From 3aba4926a2712fc51180fd3a7e28b52219c81451 Mon Sep 17 00:00:00 2001 From: smokestacklightnin <125844868+smokestacklightnin@users.noreply.github.com> Date: Thu, 7 Aug 2025 21:54:44 -0700 Subject: [PATCH 01/17] Run end of file fixer --- WORKSPACE | 1 - tensorflow_serving/g3doc/saved_model_warmup.md | 2 -- .../servables/tensorflow/testdata/bad_model_config.txt | 2 +- .../servables/tensorflow/testdata/parse_example_tflite.README | 1 - .../saved_model_half_plus_three/00000123/assets/foo.txt | 2 +- .../00000123/assets/foo.txt | 2 +- .../00000124/assets/foo.txt | 2 +- .../saved_model_half_plus_two_cpu/00000123/assets/foo.txt | 2 +- .../saved_model_half_plus_two_gpu_trt/00000123/assets/foo.txt | 2 +- .../saved_model_half_plus_two_mkl/00000123/assets/foo.txt | 2 +- .../00000123/assets.extra/mlmd_uuid | 2 +- .../saved_model_half_plus_two_mlmd/00000123/assets/foo.txt | 2 +- .../saved_model_half_plus_two_tf2_cpu/00000123/assets/foo.txt | 2 +- .../servables/tensorflow/testdata/tf_text_regression.README | 1 - tensorflow_serving/tools/docker/Dockerfile.devel-gpu | 1 - tensorflow_serving/tools/docker/Dockerfile.gpu | 1 - .../util/net_http/client/test_client/public/httpclient.h | 1 - tools/gen_status_stamp.sh | 2 -- 18 files changed, 10 insertions(+), 20 deletions(-) diff --git a/WORKSPACE b/WORKSPACE index f5e8bedf4aa..ea6b2cdb1c9 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -152,4 +152,3 @@ load( ) nccl_configure(name = "local_config_nccl") - diff --git a/tensorflow_serving/g3doc/saved_model_warmup.md b/tensorflow_serving/g3doc/saved_model_warmup.md index 1d4345ae758..838d0966ff8 100644 --- a/tensorflow_serving/g3doc/saved_model_warmup.md +++ b/tensorflow_serving/g3doc/saved_model_warmup.md @@ -48,5 +48,3 @@ Warmup data can be added in two ways: `YourSavedModel/assets.extra/tf_serving_warmup_requests` based on the validation requests provided via [RequestSpec](https://www.tensorflow.org/tfx/guide/infra_validator#requestspec). - - diff --git a/tensorflow_serving/servables/tensorflow/testdata/bad_model_config.txt b/tensorflow_serving/servables/tensorflow/testdata/bad_model_config.txt index 6649d3fd44a..887d07c4356 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/bad_model_config.txt +++ b/tensorflow_serving/servables/tensorflow/testdata/bad_model_config.txt @@ -1 +1 @@ -improperly formatted file \ No newline at end of file +improperly formatted file diff --git a/tensorflow_serving/servables/tensorflow/testdata/parse_example_tflite.README b/tensorflow_serving/servables/tensorflow/testdata/parse_example_tflite.README index d3fcb3d8d85..1b6a59f2654 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/parse_example_tflite.README +++ b/tensorflow_serving/servables/tensorflow/testdata/parse_example_tflite.README @@ -19,4 +19,3 @@ and is updated using bazel run -c opt parse_example_tflite_with_string cp /tmp/parse_example_tflite parse_example_tflite/00000123/model.tflite ``` - diff --git a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_three/00000123/assets/foo.txt b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_three/00000123/assets/foo.txt index f9ff0366880..56c3e54a43f 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_three/00000123/assets/foo.txt +++ b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_three/00000123/assets/foo.txt @@ -1 +1 @@ -asset-file-contents \ No newline at end of file +asset-file-contents diff --git a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_2_versions/00000123/assets/foo.txt b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_2_versions/00000123/assets/foo.txt index f9ff0366880..56c3e54a43f 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_2_versions/00000123/assets/foo.txt +++ b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_2_versions/00000123/assets/foo.txt @@ -1 +1 @@ -asset-file-contents \ No newline at end of file +asset-file-contents diff --git a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_2_versions/00000124/assets/foo.txt b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_2_versions/00000124/assets/foo.txt index f9ff0366880..56c3e54a43f 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_2_versions/00000124/assets/foo.txt +++ b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_2_versions/00000124/assets/foo.txt @@ -1 +1 @@ -asset-file-contents \ No newline at end of file +asset-file-contents diff --git a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_cpu/00000123/assets/foo.txt b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_cpu/00000123/assets/foo.txt index f9ff0366880..56c3e54a43f 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_cpu/00000123/assets/foo.txt +++ b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_cpu/00000123/assets/foo.txt @@ -1 +1 @@ -asset-file-contents \ No newline at end of file +asset-file-contents diff --git a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_gpu_trt/00000123/assets/foo.txt b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_gpu_trt/00000123/assets/foo.txt index f9ff0366880..56c3e54a43f 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_gpu_trt/00000123/assets/foo.txt +++ b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_gpu_trt/00000123/assets/foo.txt @@ -1 +1 @@ -asset-file-contents \ No newline at end of file +asset-file-contents diff --git a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mkl/00000123/assets/foo.txt b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mkl/00000123/assets/foo.txt index f9ff0366880..56c3e54a43f 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mkl/00000123/assets/foo.txt +++ b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mkl/00000123/assets/foo.txt @@ -1 +1 @@ -asset-file-contents \ No newline at end of file +asset-file-contents diff --git a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mlmd/00000123/assets.extra/mlmd_uuid b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mlmd/00000123/assets.extra/mlmd_uuid index afc2417480e..cffaea71d35 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mlmd/00000123/assets.extra/mlmd_uuid +++ b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mlmd/00000123/assets.extra/mlmd_uuid @@ -1 +1 @@ -test_mlmd_uuid \ No newline at end of file +test_mlmd_uuid diff --git a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mlmd/00000123/assets/foo.txt b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mlmd/00000123/assets/foo.txt index f9ff0366880..56c3e54a43f 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mlmd/00000123/assets/foo.txt +++ b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mlmd/00000123/assets/foo.txt @@ -1 +1 @@ -asset-file-contents \ No newline at end of file +asset-file-contents diff --git a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_tf2_cpu/00000123/assets/foo.txt b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_tf2_cpu/00000123/assets/foo.txt index f9ff0366880..56c3e54a43f 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_tf2_cpu/00000123/assets/foo.txt +++ b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_tf2_cpu/00000123/assets/foo.txt @@ -1 +1 @@ -asset-file-contents \ No newline at end of file +asset-file-contents diff --git a/tensorflow_serving/servables/tensorflow/testdata/tf_text_regression.README b/tensorflow_serving/servables/tensorflow/testdata/tf_text_regression.README index faf81a58f1a..038352e0c66 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/tf_text_regression.README +++ b/tensorflow_serving/servables/tensorflow/testdata/tf_text_regression.README @@ -16,4 +16,3 @@ This model is used to test the integration with TF Text, and is updated using this script: https://github.com/tensorflow/text/blob/master/oss_scripts/model_server/save_models.py - diff --git a/tensorflow_serving/tools/docker/Dockerfile.devel-gpu b/tensorflow_serving/tools/docker/Dockerfile.devel-gpu index d33bca9642b..862ec1cd0a6 100644 --- a/tensorflow_serving/tools/docker/Dockerfile.devel-gpu +++ b/tensorflow_serving/tools/docker/Dockerfile.devel-gpu @@ -187,4 +187,3 @@ FROM binary_build as clean_build RUN bazel clean --expunge --color=yes && \ rm -rf /root/.cache CMD ["/bin/bash"] - diff --git a/tensorflow_serving/tools/docker/Dockerfile.gpu b/tensorflow_serving/tools/docker/Dockerfile.gpu index 80b210d5058..fe92f8ab636 100644 --- a/tensorflow_serving/tools/docker/Dockerfile.gpu +++ b/tensorflow_serving/tools/docker/Dockerfile.gpu @@ -87,4 +87,3 @@ tensorflow_model_server --port=8500 --rest_api_port=8501 \ && chmod +x /usr/bin/tf_serving_entrypoint.sh ENTRYPOINT ["/usr/bin/tf_serving_entrypoint.sh"] - diff --git a/tensorflow_serving/util/net_http/client/test_client/public/httpclient.h b/tensorflow_serving/util/net_http/client/test_client/public/httpclient.h index 52b624d9c07..14cb4e51cff 100644 --- a/tensorflow_serving/util/net_http/client/test_client/public/httpclient.h +++ b/tensorflow_serving/util/net_http/client/test_client/public/httpclient.h @@ -46,4 +46,3 @@ inline std::unique_ptr CreateEvHTTPConnection( } // namespace tensorflow #endif // THIRD_PARTY_TENSORFLOW_SERVING_UTIL_NET_HTTP_CLIENT_TEST_CLIENT_PUBLIC_HTTPCLIENT_H_ - diff --git a/tools/gen_status_stamp.sh b/tools/gen_status_stamp.sh index f8c840d6d8d..32a73498caa 100755 --- a/tools/gen_status_stamp.sh +++ b/tools/gen_status_stamp.sh @@ -35,5 +35,3 @@ if [ -d .git ] || git rev-parse --git-dir > /dev/null 2>&1; then else echo "BUILD_SCM_REVISION no_git" fi; - - From 215c52e8bafd76e2550dcbd729dbb9f440597454 Mon Sep 17 00:00:00 2001 From: smokestacklightnin <125844868+smokestacklightnin@users.noreply.github.com> Date: Thu, 7 Aug 2025 21:55:28 -0700 Subject: [PATCH 02/17] Fix trailing whitespace --- .../tensorflow/ops/remote_predict/ops/remote_predict_op.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tensorflow_serving/experimental/tensorflow/ops/remote_predict/ops/remote_predict_op.cc b/tensorflow_serving/experimental/tensorflow/ops/remote_predict/ops/remote_predict_op.cc index def975ac60d..0710192f3e7 100644 --- a/tensorflow_serving/experimental/tensorflow/ops/remote_predict/ops/remote_predict_op.cc +++ b/tensorflow_serving/experimental/tensorflow/ops/remote_predict/ops/remote_predict_op.cc @@ -87,7 +87,7 @@ fail_op_on_rpc_error: If set true, the Op fails if the rpc fails, and returns Set true by default. max_rpc_deadline_millis: The rpc deadline for remote predict. The actual deadline is min(incoming_rpc_deadline, max_rpc_deadline_millis). -signature_name: the signature def for remote graph inference, defaulting to +signature_name: the signature def for remote graph inference, defaulting to "serving_default". target_address: Address of the server hosting the remote graph. model_name: Model name of the remote TF graph. @@ -102,7 +102,7 @@ output_tensor_aliases: Tensor of strings for the output tensor alias names to status_code: Returns the status code of the rpc call; basically converting tensorflow::error::Code to it's int value, so 0 means OK. status_error_message: Returns the error message in the rpc status. -output_tensors: Tensors returned by the Predict call on the remote graph, which +output_tensors: Tensors returned by the Predict call on the remote graph, which are in the same order as output_tensor_aliases. output_types: A list of types of the output tensors. Length of this list should be equal to the length of 'output_tensor_aliases'. From 5a515c14b49d246d1b217d361d20c8f6e241f59a Mon Sep 17 00:00:00 2001 From: smokestacklightnin <125844868+smokestacklightnin@users.noreply.github.com> Date: Fri, 8 Aug 2025 20:12:00 -0700 Subject: [PATCH 03/17] Sort Python imports --- tensorflow_serving/apis/model_service_pb2.py | 12 ++++++--- .../apis/model_service_pb2_grpc.py | 8 ++++-- .../apis/prediction_service_pb2.py | 25 +++++++++++++------ .../apis/prediction_service_pb2_grpc.py | 20 +++++++++++---- .../matrix_half_plus_two_saved_model.py | 1 + tensorflow_serving/example/mnist_client.py | 6 ++--- .../example/mnist_saved_model.py | 4 +-- tensorflow_serving/example/resnet_client.py | 2 +- .../example/resnet_client_grpc.py | 5 ++-- tensorflow_serving/example/resnet_warmup.py | 5 ++-- .../example/half_plus_two_with_rpop.py | 4 ++- .../example/half_plus_two_with_rpop_client.py | 3 +-- .../example/remote_predict_client.py | 4 ++- .../tensorflow/ops/remote_predict/__init__.py | 10 +++++--- .../python/ops/remote_predict_ops.py | 11 +++++--- .../model_servers/profiler_client.py | 5 +--- .../tensorflow_model_server_test.py | 25 +++++++++---------- .../tensorflow_model_server_test_client.py | 8 ++---- .../tensorflow_model_server_test_base.py | 12 +++------ .../tensorflow/testdata/export_counter.py | 4 +-- .../testdata/parse_example_tflite.py | 1 - .../testdata/saved_model_half_plus_two.py | 5 +--- tensorflow_serving/tools/pip_package/setup.py | 3 +-- 23 files changed, 99 insertions(+), 84 deletions(-) diff --git a/tensorflow_serving/apis/model_service_pb2.py b/tensorflow_serving/apis/model_service_pb2.py index 76f1d606cb2..a641dd12404 100644 --- a/tensorflow_serving/apis/model_service_pb2.py +++ b/tensorflow_serving/apis/model_service_pb2.py @@ -18,19 +18,25 @@ # python -m grpc.tools.protoc --python_out=. --grpc_python_out=. -I. tensorflow_serving/apis/model_service.proto import sys + _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pb2 from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 + # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() -from tensorflow_serving.apis import get_model_status_pb2 as tensorflow__serving_dot_apis_dot_get__model__status__pb2 -from tensorflow_serving.apis import model_management_pb2 as tensorflow__serving_dot_apis_dot_model__management__pb2 +from tensorflow_serving.apis import ( + get_model_status_pb2 as tensorflow__serving_dot_apis_dot_get__model__status__pb2, +) +from tensorflow_serving.apis import ( + model_management_pb2 as tensorflow__serving_dot_apis_dot_model__management__pb2, +) DESCRIPTOR = _descriptor.FileDescriptor( name='tensorflow_serving/apis/model_service.proto', diff --git a/tensorflow_serving/apis/model_service_pb2_grpc.py b/tensorflow_serving/apis/model_service_pb2_grpc.py index 44c578b8648..5b33033407c 100644 --- a/tensorflow_serving/apis/model_service_pb2_grpc.py +++ b/tensorflow_serving/apis/model_service_pb2_grpc.py @@ -19,8 +19,12 @@ import grpc -from tensorflow_serving.apis import get_model_status_pb2 as tensorflow__serving_dot_apis_dot_get__model__status__pb2 -from tensorflow_serving.apis import model_management_pb2 as tensorflow__serving_dot_apis_dot_model__management__pb2 +from tensorflow_serving.apis import ( + get_model_status_pb2 as tensorflow__serving_dot_apis_dot_get__model__status__pb2, +) +from tensorflow_serving.apis import ( + model_management_pb2 as tensorflow__serving_dot_apis_dot_model__management__pb2, +) class ModelServiceStub(object): diff --git a/tensorflow_serving/apis/prediction_service_pb2.py b/tensorflow_serving/apis/prediction_service_pb2.py index e51a700f720..c0120472c57 100644 --- a/tensorflow_serving/apis/prediction_service_pb2.py +++ b/tensorflow_serving/apis/prediction_service_pb2.py @@ -22,23 +22,34 @@ # source: tensorflow_serving/apis/prediction_service.proto import sys + _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pb2 from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 + # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() -from tensorflow_serving.apis import classification_pb2 as tensorflow__serving_dot_apis_dot_classification__pb2 -from tensorflow_serving.apis import get_model_metadata_pb2 as tensorflow__serving_dot_apis_dot_get__model__metadata__pb2 -from tensorflow_serving.apis import inference_pb2 as tensorflow__serving_dot_apis_dot_inference__pb2 -from tensorflow_serving.apis import predict_pb2 as tensorflow__serving_dot_apis_dot_predict__pb2 -from tensorflow_serving.apis import regression_pb2 as tensorflow__serving_dot_apis_dot_regression__pb2 - +from tensorflow_serving.apis import ( + classification_pb2 as tensorflow__serving_dot_apis_dot_classification__pb2, +) +from tensorflow_serving.apis import ( + get_model_metadata_pb2 as tensorflow__serving_dot_apis_dot_get__model__metadata__pb2, +) +from tensorflow_serving.apis import ( + inference_pb2 as tensorflow__serving_dot_apis_dot_inference__pb2, +) +from tensorflow_serving.apis import ( + predict_pb2 as tensorflow__serving_dot_apis_dot_predict__pb2, +) +from tensorflow_serving.apis import ( + regression_pb2 as tensorflow__serving_dot_apis_dot_regression__pb2, +) DESCRIPTOR = _descriptor.FileDescriptor( name='tensorflow_serving/apis/prediction_service.proto', diff --git a/tensorflow_serving/apis/prediction_service_pb2_grpc.py b/tensorflow_serving/apis/prediction_service_pb2_grpc.py index 082f94a39a0..a934f3ec868 100644 --- a/tensorflow_serving/apis/prediction_service_pb2_grpc.py +++ b/tensorflow_serving/apis/prediction_service_pb2_grpc.py @@ -18,11 +18,21 @@ # python -m grpc.tools.protoc --python_out=. --grpc_python_out=. -I. tensorflow_serving/apis/prediction_service.proto import grpc -from tensorflow_serving.apis import classification_pb2 as tensorflow__serving_dot_apis_dot_classification__pb2 -from tensorflow_serving.apis import get_model_metadata_pb2 as tensorflow__serving_dot_apis_dot_get__model__metadata__pb2 -from tensorflow_serving.apis import inference_pb2 as tensorflow__serving_dot_apis_dot_inference__pb2 -from tensorflow_serving.apis import predict_pb2 as tensorflow__serving_dot_apis_dot_predict__pb2 -from tensorflow_serving.apis import regression_pb2 as tensorflow__serving_dot_apis_dot_regression__pb2 +from tensorflow_serving.apis import ( + classification_pb2 as tensorflow__serving_dot_apis_dot_classification__pb2, +) +from tensorflow_serving.apis import ( + get_model_metadata_pb2 as tensorflow__serving_dot_apis_dot_get__model__metadata__pb2, +) +from tensorflow_serving.apis import ( + inference_pb2 as tensorflow__serving_dot_apis_dot_inference__pb2, +) +from tensorflow_serving.apis import ( + predict_pb2 as tensorflow__serving_dot_apis_dot_predict__pb2, +) +from tensorflow_serving.apis import ( + regression_pb2 as tensorflow__serving_dot_apis_dot_regression__pb2, +) class PredictionServiceStub(object): diff --git a/tensorflow_serving/batching/test_util/matrix_half_plus_two_saved_model.py b/tensorflow_serving/batching/test_util/matrix_half_plus_two_saved_model.py index d111459009c..16962cf221b 100644 --- a/tensorflow_serving/batching/test_util/matrix_half_plus_two_saved_model.py +++ b/tensorflow_serving/batching/test_util/matrix_half_plus_two_saved_model.py @@ -14,6 +14,7 @@ # ============================================================================== import tensorflow.compat.v1 as tf + FLAGS = tf.app.flags.FLAGS tf.app.flags.DEFINE_string("output_dir", "/tmp/matrix_half_plus_two/1", diff --git a/tensorflow_serving/example/mnist_client.py b/tensorflow_serving/example/mnist_client.py index fd90af69b2d..753bf0f7be9 100644 --- a/tensorflow_serving/example/mnist_client.py +++ b/tensorflow_serving/example/mnist_client.py @@ -31,13 +31,11 @@ import threading import grpc +import mnist_input_data import numpy import tensorflow as tf -from tensorflow_serving.apis import predict_pb2 -from tensorflow_serving.apis import prediction_service_pb2_grpc -import mnist_input_data - +from tensorflow_serving.apis import predict_pb2, prediction_service_pb2_grpc tf.compat.v1.app.flags.DEFINE_integer( 'concurrency', 1, 'maximum number of concurrent inference requests') diff --git a/tensorflow_serving/example/mnist_saved_model.py b/tensorflow_serving/example/mnist_saved_model.py index 0bb3053893c..2f70fb7abc9 100644 --- a/tensorflow_serving/example/mnist_saved_model.py +++ b/tensorflow_serving/example/mnist_saved_model.py @@ -30,12 +30,10 @@ import os import sys +import mnist_input_data import tensorflow as tf - from tensorflow.python.ops import lookup_ops -import mnist_input_data - tf.compat.v1.app.flags.DEFINE_integer('training_iteration', 1000, 'number of training iterations.') tf.compat.v1.app.flags.DEFINE_integer('model_version', 1, diff --git a/tensorflow_serving/example/resnet_client.py b/tensorflow_serving/example/resnet_client.py index b531166a91d..4ab300e31ef 100644 --- a/tensorflow_serving/example/resnet_client.py +++ b/tensorflow_serving/example/resnet_client.py @@ -34,8 +34,8 @@ import json import numpy as np -from PIL import Image import requests +from PIL import Image # The server URL specifies the endpoint of your server running the ResNet # model with the name "resnet" and using the predict interface. diff --git a/tensorflow_serving/example/resnet_client_grpc.py b/tensorflow_serving/example/resnet_client_grpc.py index 96e22ee8d2c..d0e4c372361 100644 --- a/tensorflow_serving/example/resnet_client_grpc.py +++ b/tensorflow_serving/example/resnet_client_grpc.py @@ -22,12 +22,11 @@ import grpc import numpy as np -from PIL import Image import requests import tensorflow as tf +from PIL import Image -from tensorflow_serving.apis import predict_pb2 -from tensorflow_serving.apis import prediction_service_pb2_grpc +from tensorflow_serving.apis import predict_pb2, prediction_service_pb2_grpc # The image URL is the location of the image we should send to the server IMAGE_URL = 'https://tensorflow.org/images/blogs/serving/cat.jpg' diff --git a/tensorflow_serving/example/resnet_warmup.py b/tensorflow_serving/example/resnet_warmup.py index c7a6af151fb..958f95d95c6 100644 --- a/tensorflow_serving/example/resnet_warmup.py +++ b/tensorflow_serving/example/resnet_warmup.py @@ -38,12 +38,11 @@ import sys import numpy as np -from PIL import Image import requests import tensorflow as tf -from tensorflow_serving.apis import predict_pb2 -from tensorflow_serving.apis import prediction_log_pb2 +from PIL import Image +from tensorflow_serving.apis import predict_pb2, prediction_log_pb2 # IMAGE_URLS are the locations of the images we use to warmup the model IMAGE_URLS = ['https://tensorflow.org/images/blogs/serving/cat.jpg', diff --git a/tensorflow_serving/experimental/example/half_plus_two_with_rpop.py b/tensorflow_serving/experimental/example/half_plus_two_with_rpop.py index 07fe685d139..f90dea3f7dd 100644 --- a/tensorflow_serving/experimental/example/half_plus_two_with_rpop.py +++ b/tensorflow_serving/experimental/example/half_plus_two_with_rpop.py @@ -29,7 +29,9 @@ import tensorflow.compat.v1 as tf -from tensorflow_serving.experimental.tensorflow.ops.remote_predict.python.ops import remote_predict_ops +from tensorflow_serving.experimental.tensorflow.ops.remote_predict.python.ops import ( + remote_predict_ops, +) tf.app.flags.DEFINE_string("output_dir", "/tmp/half_plus_two_with_rpop/1/", "Savedmodel export path") diff --git a/tensorflow_serving/experimental/example/half_plus_two_with_rpop_client.py b/tensorflow_serving/experimental/example/half_plus_two_with_rpop_client.py index 261c8a029dc..6e5a5e0b2a4 100644 --- a/tensorflow_serving/experimental/example/half_plus_two_with_rpop_client.py +++ b/tensorflow_serving/experimental/example/half_plus_two_with_rpop_client.py @@ -41,8 +41,7 @@ import grpc import tensorflow.compat.v1 as tf -from tensorflow_serving.apis import predict_pb2 -from tensorflow_serving.apis import prediction_service_pb2_grpc +from tensorflow_serving.apis import predict_pb2, prediction_service_pb2_grpc tf.app.flags.DEFINE_string('server', 'localhost:8500', 'PredictionService host:port') diff --git a/tensorflow_serving/experimental/example/remote_predict_client.py b/tensorflow_serving/experimental/example/remote_predict_client.py index ccf98a12c2c..740b123f623 100644 --- a/tensorflow_serving/experimental/example/remote_predict_client.py +++ b/tensorflow_serving/experimental/example/remote_predict_client.py @@ -21,7 +21,9 @@ import tensorflow.compat.v1 as tf -from tensorflow_serving.experimental.tensorflow.ops.remote_predict.python.ops import remote_predict_ops +from tensorflow_serving.experimental.tensorflow.ops.remote_predict.python.ops import ( + remote_predict_ops, +) tf.app.flags.DEFINE_string("input_tensor_aliases", "x", "Aliases of input tensors") diff --git a/tensorflow_serving/experimental/tensorflow/ops/remote_predict/__init__.py b/tensorflow_serving/experimental/tensorflow/ops/remote_predict/__init__.py index c1597decea2..8a468e2d9e0 100644 --- a/tensorflow_serving/experimental/tensorflow/ops/remote_predict/__init__.py +++ b/tensorflow_serving/experimental/tensorflow/ops/remote_predict/__init__.py @@ -16,10 +16,12 @@ @@run """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +from __future__ import absolute_import, division, print_function + from tensorflow.python.util.all_util import remove_undocumented -from tensorflow_serving.experimental.tensorflow.ops.remote_predict.python.ops.remote_predict_ops import run + +from tensorflow_serving.experimental.tensorflow.ops.remote_predict.python.ops.remote_predict_ops import ( + run, +) remove_undocumented(__name__) diff --git a/tensorflow_serving/experimental/tensorflow/ops/remote_predict/python/ops/remote_predict_ops.py b/tensorflow_serving/experimental/tensorflow/ops/remote_predict/python/ops/remote_predict_ops.py index b2854503459..0cf85e484c9 100644 --- a/tensorflow_serving/experimental/tensorflow/ops/remote_predict/python/ops/remote_predict_ops.py +++ b/tensorflow_serving/experimental/tensorflow/ops/remote_predict/python/ops/remote_predict_ops.py @@ -14,16 +14,19 @@ # ============================================================================== """Operations for RemotePredict.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +from __future__ import absolute_import, division, print_function import os.path + import tensorflow.compat.v1 as tf -from tensorflow_serving.experimental.tensorflow.ops.remote_predict.ops import gen_remote_predict_op +from tensorflow_serving.experimental.tensorflow.ops.remote_predict.ops import ( + gen_remote_predict_op, +) + # pylint: disable=wildcard-import from tensorflow_serving.experimental.tensorflow.ops.remote_predict.ops.gen_remote_predict_op import * + # pylint: enable=wildcard-import _remote_predict_op_module = tf.load_op_library( diff --git a/tensorflow_serving/model_servers/profiler_client.py b/tensorflow_serving/model_servers/profiler_client.py index c4573842980..1a80d0f8ce3 100644 --- a/tensorflow_serving/model_servers/profiler_client.py +++ b/tensorflow_serving/model_servers/profiler_client.py @@ -14,12 +14,9 @@ # ============================================================================== """Simple client to send profiling request to ModelServer.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +from __future__ import absolute_import, division, print_function import tensorflow as tf - from tensorflow.python.profiler import profiler_client diff --git a/tensorflow_serving/model_servers/tensorflow_model_server_test.py b/tensorflow_serving/model_servers/tensorflow_model_server_test.py index 8ad8f91b153..7670f0c5eda 100644 --- a/tensorflow_serving/model_servers/tensorflow_model_server_test.py +++ b/tensorflow_serving/model_servers/tensorflow_model_server_test.py @@ -15,9 +15,7 @@ """Tests for tensorflow_model_server.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +from __future__ import absolute_import, division, print_function import json import os @@ -25,7 +23,6 @@ import sys import time - # During the creation of the 'tensorflow_serving' package, bazel emits a python # 'tensorflow' module which contains its dependencies (e.g. example.proto). # Since bazel python libraries take precedence in sys.path and the created @@ -37,19 +34,21 @@ import grpc -from six.moves import range import tensorflow.compat.v1 as tf - +from six.moves import range from tensorflow.python.platform import flags from tensorflow.python.profiler import profiler_client from tensorflow.python.saved_model import signature_constants -from tensorflow_serving.apis import classification_pb2 -from tensorflow_serving.apis import get_model_metadata_pb2 -from tensorflow_serving.apis import get_model_status_pb2 -from tensorflow_serving.apis import inference_pb2 -from tensorflow_serving.apis import model_service_pb2_grpc -from tensorflow_serving.apis import prediction_service_pb2_grpc -from tensorflow_serving.apis import regression_pb2 + +from tensorflow_serving.apis import ( + classification_pb2, + get_model_metadata_pb2, + get_model_status_pb2, + inference_pb2, + model_service_pb2_grpc, + prediction_service_pb2_grpc, + regression_pb2, +) from tensorflow_serving.model_servers.test_util import tensorflow_model_server_test_base FLAGS = flags.FLAGS diff --git a/tensorflow_serving/model_servers/tensorflow_model_server_test_client.py b/tensorflow_serving/model_servers/tensorflow_model_server_test_client.py index 11274737ec7..1322d5eb167 100644 --- a/tensorflow_serving/model_servers/tensorflow_model_server_test_client.py +++ b/tensorflow_serving/model_servers/tensorflow_model_server_test_client.py @@ -14,18 +14,14 @@ # ============================================================================== """Manual test client for tensorflow_model_server.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +from __future__ import absolute_import, division, print_function import grpc import tensorflow as tf - from tensorflow.core.framework import types_pb2 from tensorflow.python.platform import flags -from tensorflow_serving.apis import predict_pb2 -from tensorflow_serving.apis import prediction_service_pb2_grpc +from tensorflow_serving.apis import predict_pb2, prediction_service_pb2_grpc tf.compat.v1.app.flags.DEFINE_string('server', 'localhost:8500', 'inception_inference service host:port') diff --git a/tensorflow_serving/model_servers/test_util/tensorflow_model_server_test_base.py b/tensorflow_serving/model_servers/test_util/tensorflow_model_server_test_base.py index 1a0b250a12d..92df34a5b14 100644 --- a/tensorflow_serving/model_servers/test_util/tensorflow_model_server_test_base.py +++ b/tensorflow_serving/model_servers/test_util/tensorflow_model_server_test_base.py @@ -15,9 +15,7 @@ """Tests for tensorflow_model_server.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +from __future__ import absolute_import, division, print_function import atexit import json @@ -28,15 +26,13 @@ import time import grpc -from six.moves import range -from six.moves import urllib import tensorflow as tf - +from six.moves import range, urllib from tensorflow.core.framework import types_pb2 from tensorflow.python.platform import flags from tensorflow.python.saved_model import signature_constants -from tensorflow_serving.apis import predict_pb2 -from tensorflow_serving.apis import prediction_service_pb2_grpc + +from tensorflow_serving.apis import predict_pb2, prediction_service_pb2_grpc FLAGS = flags.FLAGS diff --git a/tensorflow_serving/servables/tensorflow/testdata/export_counter.py b/tensorflow_serving/servables/tensorflow/testdata/export_counter.py index c4ada219c92..5cff61650fc 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/export_counter.py +++ b/tensorflow_serving/servables/tensorflow/testdata/export_counter.py @@ -18,9 +18,7 @@ reset_counter, to test Predict service. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +from __future__ import absolute_import, division, print_function import tensorflow as tf diff --git a/tensorflow_serving/servables/tensorflow/testdata/parse_example_tflite.py b/tensorflow_serving/servables/tensorflow/testdata/parse_example_tflite.py index 17790e90b2d..c7c3b8d8d25 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/parse_example_tflite.py +++ b/tensorflow_serving/servables/tensorflow/testdata/parse_example_tflite.py @@ -34,7 +34,6 @@ import sys import tensorflow.compat.v1 as tf - from tensorflow.lite.tools.signature import signature_def_utils FLAGS = None diff --git a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two.py b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two.py index fe05c062cf0..1b4a9669763 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two.py +++ b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two.py @@ -45,16 +45,13 @@ --device=gpu """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +from __future__ import absolute_import, division, print_function import argparse import os import sys import tensorflow.compat.v1 as tf - from tensorflow.lite.tools.signature import signature_def_utils from tensorflow.python.lib.io import file_io diff --git a/tensorflow_serving/tools/pip_package/setup.py b/tensorflow_serving/tools/pip_package/setup.py index d7b21224ae7..820c9199cc4 100644 --- a/tensorflow_serving/tools/pip_package/setup.py +++ b/tensorflow_serving/tools/pip_package/setup.py @@ -25,8 +25,7 @@ import sys -from setuptools import find_packages -from setuptools import setup +from setuptools import find_packages, setup DOCLINES = __doc__.split('\n') From 82e12a6ddef17c8e0f42b8fd7b91c0ec6a11d5a2 Mon Sep 17 00:00:00 2001 From: smokestacklightnin <125844868+smokestacklightnin@users.noreply.github.com> Date: Fri, 8 Aug 2025 20:25:48 -0700 Subject: [PATCH 04/17] Make compliant with Pyflakes `F` Ruff rules One `noqa` exception was added --- .../servables/tensorflow/testdata/export_bad_half_plus_two.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow_serving/servables/tensorflow/testdata/export_bad_half_plus_two.py b/tensorflow_serving/servables/tensorflow/testdata/export_bad_half_plus_two.py index b5b7bde74c7..5ba302957b9 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/export_bad_half_plus_two.py +++ b/tensorflow_serving/servables/tensorflow/testdata/export_bad_half_plus_two.py @@ -34,7 +34,7 @@ def Export(): # Calculate, y = a*x + b # here we use a placeholder 'x' which is fed at inference time. x = tf.placeholder(tf.float32) - y = tf.add(tf.multiply(a, x), b) + y = tf.add(tf.multiply(a, x), b) # noqa: F841 # Export the model without signatures. # Note that the model is intentionally exported without using exporter, From bdf47e81438414bbaedf4bcca79645ef53fae816 Mon Sep 17 00:00:00 2001 From: smokestacklightnin <125844868+smokestacklightnin@users.noreply.github.com> Date: Fri, 8 Aug 2025 20:41:26 -0700 Subject: [PATCH 05/17] Make compliant with Bug Bear Ruff rules --- .../servables/tensorflow/testdata/saved_model_half_plus_two.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two.py b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two.py index 1b4a9669763..1cb226ccdaf 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two.py +++ b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two.py @@ -196,7 +196,8 @@ def get_serving_signatures(self): } @tf.function(input_signature=[tf.TensorSpec(shape=[1], dtype=tf.float32)]) - def predict(self, x=tf.constant([0], shape=[1], dtype=tf.float32)): + def predict(self, x=None): + x = tf.constant([0], shape=[1], dtype=tf.float32) if x is None else x return {"y": self.compute(x, self.b)} @tf.function( From c7c3b555ce0f4b2201c3f8c056ede83e86f6d295 Mon Sep 17 00:00:00 2001 From: smokestacklightnin <125844868+smokestacklightnin@users.noreply.github.com> Date: Fri, 8 Aug 2025 20:47:35 -0700 Subject: [PATCH 06/17] Make compliant with Ruff `SIM` rules --- .../servables/tensorflow/testdata/parse_example_tflite.py | 3 ++- .../servables/tensorflow/testdata/saved_model_half_plus_two.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/tensorflow_serving/servables/tensorflow/testdata/parse_example_tflite.py b/tensorflow_serving/servables/tensorflow/testdata/parse_example_tflite.py index c7c3b8d8d25..2ffa3584a1b 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/parse_example_tflite.py +++ b/tensorflow_serving/servables/tensorflow/testdata/parse_example_tflite.py @@ -86,7 +86,8 @@ def _generate_tflite_for_parse_example_with_string(export_dir): k = tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY tflite_model = signature_def_utils.set_signature_defs( tflite_model, {k: predict_signature_def}) - open(export_dir + "/model.tflite", "wb").write(tflite_model) + with open(export_dir + "/model.tflite", "wb") as fp: + fp.write(tflite_model) def main(_): diff --git a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two.py b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two.py index 1cb226ccdaf..73d319ebd9f 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two.py +++ b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two.py @@ -384,7 +384,8 @@ def _generate_saved_model_for_half_plus_two( k = tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY tflite_model = signature_def_utils.set_signature_defs( tflite_model, {k: predict_signature_def}) - open(export_dir + "/model.tflite", "wb").write(tflite_model) + with open(export_dir + "/model.tflite", "wb") as fp: + fp.write(tflite_model) else: if use_main_op: builder.add_meta_graph_and_variables( From 436cf9304dc9928cf27010ff18084f1d3aea3aa2 Mon Sep 17 00:00:00 2001 From: smokestacklightnin <125844868+smokestacklightnin@users.noreply.github.com> Date: Fri, 8 Aug 2025 21:02:58 -0700 Subject: [PATCH 07/17] Make compliant with Ruff `D` rules --- .../apis/model_service_pb2_grpc.py | 21 ++++++--------- .../apis/prediction_service_pb2_grpc.py | 27 ++++++++----------- .../matrix_half_plus_two_saved_model.py | 17 ++++++------ tensorflow_serving/example/mnist_client.py | 1 + .../example/mnist_input_data.py | 1 - .../example/resnet_client_grpc.py | 4 +-- tensorflow_serving/example/resnet_warmup.py | 24 ++++++++--------- .../tensorflow_model_server_test.py | 3 --- 8 files changed, 42 insertions(+), 56 deletions(-) diff --git a/tensorflow_serving/apis/model_service_pb2_grpc.py b/tensorflow_serving/apis/model_service_pb2_grpc.py index 5b33033407c..d76b233c500 100644 --- a/tensorflow_serving/apis/model_service_pb2_grpc.py +++ b/tensorflow_serving/apis/model_service_pb2_grpc.py @@ -28,9 +28,7 @@ class ModelServiceStub(object): - """ModelService provides methods to query and update the state of the server, - e.g. which models/versions are being served. - """ + """ModelService provides methods to query and update the state of the server, e.g. which models/versions are being served.""" def __init__(self, channel): """Constructor. @@ -55,24 +53,21 @@ def __init__(self, channel): class ModelServiceServicer(object): - """ModelService provides methods to query and update the state of the server, - e.g. which models/versions are being served. - """ + """ModelService provides methods to query and update the state of the server e.g. which models/versions are being served.""" def GetModelStatus(self, request, context): - """Gets status of model. If the ModelSpec in the request does not specify - version, information about all versions of the model will be returned. If - the ModelSpec in the request does specify a version, the status of only - that version will be returned. + """Gets status of model. + + If the ModelSpec in the request does not specify version, information about all versions of the model will be returned. If the ModelSpec in the request does specify a version, the status of only that version will be returned. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def HandleReloadConfigRequest(self, request, context): - """Reloads the set of served models. The new config supersedes the old one, - so if a model is omitted from the new config it will be unloaded and no - longer served. + """Reloads the set of served models. + + The new config supersedes the old one, so if a model is omitted from the new config it will be unloaded and no longer served. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') diff --git a/tensorflow_serving/apis/prediction_service_pb2_grpc.py b/tensorflow_serving/apis/prediction_service_pb2_grpc.py index a934f3ec868..3026d5f7dd9 100644 --- a/tensorflow_serving/apis/prediction_service_pb2_grpc.py +++ b/tensorflow_serving/apis/prediction_service_pb2_grpc.py @@ -36,9 +36,9 @@ class PredictionServiceStub(object): - """open source marker; do not remove - PredictionService provides access to machine-learned models loaded by - model_servers. + """PredictionService provides access to machine-learned models loaded by model_servers. + + open source marker; do not remove """ def __init__(self, channel): @@ -75,42 +75,37 @@ def __init__(self, channel): class PredictionServiceServicer(object): - """open source marker; do not remove - PredictionService provides access to machine-learned models loaded by - model_servers. + """PredictionService provides access to machine-learned models loaded by model_servers. + + open source marker; do not remove """ def Classify(self, request, context): - """Classify. - """ + """Classify.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Regress(self, request, context): - """Regress. - """ + """Regress.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Predict(self, request, context): - """Predict -- provides access to loaded TensorFlow model. - """ + """Predict -- provides access to loaded TensorFlow model.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def MultiInference(self, request, context): - """MultiInference API for multi-headed models. - """ + """MultiInference API for multi-headed models.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetModelMetadata(self, request, context): - """GetModelMetadata - provides access to metadata for loaded models. - """ + """GetModelMetadata - provides access to metadata for loaded models.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') diff --git a/tensorflow_serving/batching/test_util/matrix_half_plus_two_saved_model.py b/tensorflow_serving/batching/test_util/matrix_half_plus_two_saved_model.py index 16962cf221b..2c405dbd459 100644 --- a/tensorflow_serving/batching/test_util/matrix_half_plus_two_saved_model.py +++ b/tensorflow_serving/batching/test_util/matrix_half_plus_two_saved_model.py @@ -22,14 +22,15 @@ def _generate_saved_model_for_matrix_half_plus_two(export_dir): - """Creates SavedModel for half plus two model that accepts batches of - 3*3 matrices. - The model divides all elements in each matrix by 2 and adds 2 to them. - So, for one input matrix [[1, 2, 3], [4, 5, 6], [7, 8, 9]] - the result will be [[2.5, 3, 3.5], [4, 4.5, 5], [5.5, 6, 6.5]]. - Args: - export_dir: The directory where to write SavedModel files. - """ + """Creates SavedModel for half plus two model that accepts batches of 3*3 matrices. + + The model divides all elements in each matrix by 2 and adds 2 to them. + So, for one input matrix [[1, 2, 3], [4, 5, 6], [7, 8, 9]] + the result will be [[2.5, 3, 3.5], [4, 4.5, 5], [5.5, 6, 6.5]]. + + Args: + export_dir: The directory where to write SavedModel files. + """ builder = tf.saved_model.builder.SavedModelBuilder(export_dir) with tf.Session() as session: x = tf.placeholder(tf.float32, shape=[None, 3, 3], name="x") diff --git a/tensorflow_serving/example/mnist_client.py b/tensorflow_serving/example/mnist_client.py index 753bf0f7be9..9c7a410f2d7 100644 --- a/tensorflow_serving/example/mnist_client.py +++ b/tensorflow_serving/example/mnist_client.py @@ -90,6 +90,7 @@ def _create_rpc_callback(label, result_counter): Args: label: The correct label for the predicted example. result_counter: Counter for the prediction result. + Returns: The callback function. """ diff --git a/tensorflow_serving/example/mnist_input_data.py b/tensorflow_serving/example/mnist_input_data.py index 3e8021a2435..09a3060b845 100644 --- a/tensorflow_serving/example/mnist_input_data.py +++ b/tensorflow_serving/example/mnist_input_data.py @@ -100,7 +100,6 @@ class DataSet(object): def __init__(self, images, labels, fake_data=False, one_hot=False): """Construct a DataSet. one_hot arg is used only if fake_data is true.""" - if fake_data: self._num_examples = 10000 self.one_hot = one_hot diff --git a/tensorflow_serving/example/resnet_client_grpc.py b/tensorflow_serving/example/resnet_client_grpc.py index d0e4c372361..cb6d53ca9a1 100644 --- a/tensorflow_serving/example/resnet_client_grpc.py +++ b/tensorflow_serving/example/resnet_client_grpc.py @@ -12,9 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Send JPEG image to tensorflow_model_server loaded with ResNet model. - -""" +"""Send JPEG image to tensorflow_model_server loaded with ResNet model.""" from __future__ import print_function diff --git a/tensorflow_serving/example/resnet_warmup.py b/tensorflow_serving/example/resnet_warmup.py index 958f95d95c6..462d871a21e 100644 --- a/tensorflow_serving/example/resnet_warmup.py +++ b/tensorflow_serving/example/resnet_warmup.py @@ -14,21 +14,21 @@ # ============================================================================== """Creates the tf_serving_warmup_requests file to warm up a ResNet SavedModel. - 1. Invoke this script passing in the saved_model directory (including version - folder, the folder containing saved_model.pb) as an argument. - 2. Restart tensorflow_model_server. +1. Invoke this script passing in the saved_model directory (including version +folder, the folder containing saved_model.pb) as an argument. +2. Restart tensorflow_model_server. - If unsure of the model directory, look for the output: - 'No warmup data file found at' in the tensorflow_model_server - startup log +If unsure of the model directory, look for the output: +'No warmup data file found at' in the tensorflow_model_server +startup log - After the script is run, and tensorflow_model_server is restarted, to verify - it is working look for the output: - 'Starting to read warmup data for model at' and 'Finished reading warmup data - for model at' in the tensorflow_model_server startup log +After the script is run, and tensorflow_model_server is restarted, to verify +it is working look for the output: +'Starting to read warmup data for model at' and 'Finished reading warmup data +for model at' in the tensorflow_model_server startup log - Usage example: - python resnet_warmup.py saved_model_dir +Usage example: +python resnet_warmup.py saved_model_dir """ from __future__ import print_function diff --git a/tensorflow_serving/model_servers/tensorflow_model_server_test.py b/tensorflow_serving/model_servers/tensorflow_model_server_test.py index 7670f0c5eda..28343a2717a 100644 --- a/tensorflow_serving/model_servers/tensorflow_model_server_test.py +++ b/tensorflow_serving/model_servers/tensorflow_model_server_test.py @@ -313,7 +313,6 @@ def testBadModelConfig(self): def testModelConfigReload(self): """Test model server polls filesystem for model configuration.""" - base_config_proto = """ model_config_list: {{ config: {{ @@ -367,7 +366,6 @@ def testModelConfigReload(self): def testModelConfigReloadWithZeroPollPeriod(self): """Test model server does not poll filesystem for model config.""" - base_config_proto = """ model_config_list: {{ config: {{ @@ -734,7 +732,6 @@ def test_distrat_sequential_keras_saved_model_save(self): def test_profiler_service_with_valid_trace_request(self): """Test integration with profiler service by sending tracing requests.""" - # Start model server model_path = self._GetSavedModelBundlePath() _, grpc_addr, rest_addr = TensorflowModelServerTest.RunServer( From aa583db3e52f0107ba71ce6259c1b142a317bca8 Mon Sep 17 00:00:00 2001 From: smokestacklightnin <125844868+smokestacklightnin@users.noreply.github.com> Date: Fri, 8 Aug 2025 23:23:12 -0700 Subject: [PATCH 08/17] Make compliant with the Ruff `UP` rule --- .../apis/model_service_pb2_grpc.py | 4 +- .../apis/prediction_service_pb2_grpc.py | 4 +- tensorflow_serving/example/mnist_client.py | 3 +- .../example/mnist_input_data.py | 5 +-- .../example/mnist_saved_model.py | 1 - tensorflow_serving/example/resnet_client.py | 4 +- .../example/resnet_client_grpc.py | 3 +- tensorflow_serving/example/resnet_warmup.py | 1 - .../example/half_plus_two_with_rpop_client.py | 1 - .../example/remote_predict_client.py | 1 - .../tensorflow/ops/remote_predict/__init__.py | 1 - .../python/ops/remote_predict_ops.py | 1 - .../model_servers/profiler_client.py | 1 - .../tensorflow_model_server_test.py | 45 +++++++++---------- .../tensorflow_model_server_test_client.py | 1 - .../tensorflow_model_server_test_base.py | 15 +++---- .../tensorflow/testdata/export_counter.py | 1 - .../testdata/saved_model_half_plus_two.py | 1 - 18 files changed, 36 insertions(+), 57 deletions(-) diff --git a/tensorflow_serving/apis/model_service_pb2_grpc.py b/tensorflow_serving/apis/model_service_pb2_grpc.py index d76b233c500..1db05fc26a2 100644 --- a/tensorflow_serving/apis/model_service_pb2_grpc.py +++ b/tensorflow_serving/apis/model_service_pb2_grpc.py @@ -27,7 +27,7 @@ ) -class ModelServiceStub(object): +class ModelServiceStub: """ModelService provides methods to query and update the state of the server, e.g. which models/versions are being served.""" def __init__(self, channel): @@ -52,7 +52,7 @@ def __init__(self, channel): ) -class ModelServiceServicer(object): +class ModelServiceServicer: """ModelService provides methods to query and update the state of the server e.g. which models/versions are being served.""" def GetModelStatus(self, request, context): diff --git a/tensorflow_serving/apis/prediction_service_pb2_grpc.py b/tensorflow_serving/apis/prediction_service_pb2_grpc.py index 3026d5f7dd9..14e18463e09 100644 --- a/tensorflow_serving/apis/prediction_service_pb2_grpc.py +++ b/tensorflow_serving/apis/prediction_service_pb2_grpc.py @@ -35,7 +35,7 @@ ) -class PredictionServiceStub(object): +class PredictionServiceStub: """PredictionService provides access to machine-learned models loaded by model_servers. open source marker; do not remove @@ -74,7 +74,7 @@ def __init__(self, channel): ) -class PredictionServiceServicer(object): +class PredictionServiceServicer: """PredictionService provides access to machine-learned models loaded by model_servers. open source marker; do not remove diff --git a/tensorflow_serving/example/mnist_client.py b/tensorflow_serving/example/mnist_client.py index 9c7a410f2d7..c4d4b5ba581 100644 --- a/tensorflow_serving/example/mnist_client.py +++ b/tensorflow_serving/example/mnist_client.py @@ -25,7 +25,6 @@ mnist_client.py --num_tests=100 --server=localhost:9000 """ -from __future__ import print_function import sys import threading @@ -46,7 +45,7 @@ FLAGS = tf.compat.v1.app.flags.FLAGS -class _ResultCounter(object): +class _ResultCounter: """Counter for the prediction results.""" def __init__(self, num_tests, concurrency): diff --git a/tensorflow_serving/example/mnist_input_data.py b/tensorflow_serving/example/mnist_input_data.py index 09a3060b845..94bdae0db48 100644 --- a/tensorflow_serving/example/mnist_input_data.py +++ b/tensorflow_serving/example/mnist_input_data.py @@ -17,7 +17,6 @@ """Functions for downloading and reading MNIST data.""" -from __future__ import print_function import gzip import os @@ -95,7 +94,7 @@ def extract_labels(filename, one_hot=False): return labels -class DataSet(object): +class DataSet: """Class encompassing test, validation and training MNIST data set.""" def __init__(self, images, labels, fake_data=False, one_hot=False): @@ -170,7 +169,7 @@ def next_batch(self, batch_size, fake_data=False): def read_data_sets(train_dir, fake_data=False, one_hot=False): """Return training, validation and testing data sets.""" - class DataSets(object): + class DataSets: pass data_sets = DataSets() diff --git a/tensorflow_serving/example/mnist_saved_model.py b/tensorflow_serving/example/mnist_saved_model.py index 2f70fb7abc9..a920517b2db 100644 --- a/tensorflow_serving/example/mnist_saved_model.py +++ b/tensorflow_serving/example/mnist_saved_model.py @@ -25,7 +25,6 @@ export_dir """ -from __future__ import print_function import os import sys diff --git a/tensorflow_serving/example/resnet_client.py b/tensorflow_serving/example/resnet_client.py index 4ab300e31ef..e6e9163ef9c 100644 --- a/tensorflow_serving/example/resnet_client.py +++ b/tensorflow_serving/example/resnet_client.py @@ -27,7 +27,6 @@ resnet_client.py """ -from __future__ import print_function import base64 import io @@ -79,8 +78,7 @@ def main(): total_time += response.elapsed.total_seconds() prediction = response.json()['predictions'][0] - print('Prediction class: {}, avg latency: {} ms'.format( - np.argmax(prediction), (total_time * 1000) / num_requests)) + print(f'Prediction class: {np.argmax(prediction)}, avg latency: {(total_time * 1000) / num_requests} ms') if __name__ == '__main__': diff --git a/tensorflow_serving/example/resnet_client_grpc.py b/tensorflow_serving/example/resnet_client_grpc.py index cb6d53ca9a1..0d721815260 100644 --- a/tensorflow_serving/example/resnet_client_grpc.py +++ b/tensorflow_serving/example/resnet_client_grpc.py @@ -14,7 +14,6 @@ # ============================================================================== """Send JPEG image to tensorflow_model_server loaded with ResNet model.""" -from __future__ import print_function import io @@ -68,7 +67,7 @@ def main(_): tf.make_tensor_proto(data)) result = stub.Predict(request, 10.0) # 10 secs timeout result = result.outputs['activation_49'].float_val - print('Prediction class: {}'.format(np.argmax(result))) + print(f'Prediction class: {np.argmax(result)}') if __name__ == '__main__': diff --git a/tensorflow_serving/example/resnet_warmup.py b/tensorflow_serving/example/resnet_warmup.py index 462d871a21e..a7018f1e76a 100644 --- a/tensorflow_serving/example/resnet_warmup.py +++ b/tensorflow_serving/example/resnet_warmup.py @@ -31,7 +31,6 @@ python resnet_warmup.py saved_model_dir """ -from __future__ import print_function import io import os diff --git a/tensorflow_serving/experimental/example/half_plus_two_with_rpop_client.py b/tensorflow_serving/experimental/example/half_plus_two_with_rpop_client.py index 6e5a5e0b2a4..8e7800ad1dc 100644 --- a/tensorflow_serving/experimental/example/half_plus_two_with_rpop_client.py +++ b/tensorflow_serving/experimental/example/half_plus_two_with_rpop_client.py @@ -36,7 +36,6 @@ tensorflow_model_server --port=8500 --model_config_file=/tmp/config_file.txt """ -from __future__ import print_function import grpc import tensorflow.compat.v1 as tf diff --git a/tensorflow_serving/experimental/example/remote_predict_client.py b/tensorflow_serving/experimental/example/remote_predict_client.py index 740b123f623..882d9b984e9 100644 --- a/tensorflow_serving/experimental/example/remote_predict_client.py +++ b/tensorflow_serving/experimental/example/remote_predict_client.py @@ -17,7 +17,6 @@ Example client code which calls the Remote Predict Op directly. """ -from __future__ import print_function import tensorflow.compat.v1 as tf diff --git a/tensorflow_serving/experimental/tensorflow/ops/remote_predict/__init__.py b/tensorflow_serving/experimental/tensorflow/ops/remote_predict/__init__.py index 8a468e2d9e0..11a4bde193b 100644 --- a/tensorflow_serving/experimental/tensorflow/ops/remote_predict/__init__.py +++ b/tensorflow_serving/experimental/tensorflow/ops/remote_predict/__init__.py @@ -16,7 +16,6 @@ @@run """ -from __future__ import absolute_import, division, print_function from tensorflow.python.util.all_util import remove_undocumented diff --git a/tensorflow_serving/experimental/tensorflow/ops/remote_predict/python/ops/remote_predict_ops.py b/tensorflow_serving/experimental/tensorflow/ops/remote_predict/python/ops/remote_predict_ops.py index 0cf85e484c9..0093e3af7aa 100644 --- a/tensorflow_serving/experimental/tensorflow/ops/remote_predict/python/ops/remote_predict_ops.py +++ b/tensorflow_serving/experimental/tensorflow/ops/remote_predict/python/ops/remote_predict_ops.py @@ -14,7 +14,6 @@ # ============================================================================== """Operations for RemotePredict.""" -from __future__ import absolute_import, division, print_function import os.path diff --git a/tensorflow_serving/model_servers/profiler_client.py b/tensorflow_serving/model_servers/profiler_client.py index 1a80d0f8ce3..65bb98f361c 100644 --- a/tensorflow_serving/model_servers/profiler_client.py +++ b/tensorflow_serving/model_servers/profiler_client.py @@ -14,7 +14,6 @@ # ============================================================================== """Simple client to send profiling request to ModelServer.""" -from __future__ import absolute_import, division, print_function import tensorflow as tf from tensorflow.python.profiler import profiler_client diff --git a/tensorflow_serving/model_servers/tensorflow_model_server_test.py b/tensorflow_serving/model_servers/tensorflow_model_server_test.py index 28343a2717a..429e3f150b8 100644 --- a/tensorflow_serving/model_servers/tensorflow_model_server_test.py +++ b/tensorflow_serving/model_servers/tensorflow_model_server_test.py @@ -15,7 +15,6 @@ """Tests for tensorflow_model_server.""" -from __future__ import absolute_import, division, print_function import json import os @@ -73,7 +72,7 @@ def __BuildModelConfigFile(self): in the configuration template file and writes it out to another file used by the test. """ - with open(self._GetGoodModelConfigTemplate(), 'r') as template_file: + with open(self._GetGoodModelConfigTemplate()) as template_file: config = template_file.read().replace('${TEST_HALF_PLUS_TWO_DIR}', self._GetSavedModelBundlePath()) config = config.replace('${TEST_HALF_PLUS_THREE_DIR}', @@ -439,7 +438,7 @@ def testClassifyREST(self): model_path)[2].split(':') # Prepare request - url = 'http://{}:{}/v1/models/default:classify'.format(host, port) + url = f'http://{host}:{port}/v1/models/default:classify' json_req = {'signature_name': 'classify_x_to_y', 'examples': [{'x': 2.0}]} # Send request @@ -447,7 +446,7 @@ def testClassifyREST(self): try: resp_data = tensorflow_model_server_test_base.CallREST(url, json_req) except Exception as e: # pylint: disable=broad-except - self.fail('Request failed with error: {}'.format(e)) + self.fail(f'Request failed with error: {e}') # Verify response self.assertEqual(json.loads(resp_data.decode()), {'results': [[['', 3.0]]]}) @@ -459,7 +458,7 @@ def testRegressREST(self): model_path)[2].split(':') # Prepare request - url = 'http://{}:{}/v1/models/default:regress'.format(host, port) + url = f'http://{host}:{port}/v1/models/default:regress' json_req = {'signature_name': 'regress_x_to_y', 'examples': [{'x': 2.0}]} # Send request @@ -467,7 +466,7 @@ def testRegressREST(self): try: resp_data = tensorflow_model_server_test_base.CallREST(url, json_req) except Exception as e: # pylint: disable=broad-except - self.fail('Request failed with error: {}'.format(e)) + self.fail(f'Request failed with error: {e}') # Verify response self.assertEqual(json.loads(resp_data.decode()), {'results': [3.0]}) @@ -479,7 +478,7 @@ def testPredictREST(self): model_path)[2].split(':') # Prepare request - url = 'http://{}:{}/v1/models/default:predict'.format(host, port) + url = f'http://{host}:{port}/v1/models/default:predict' json_req = {'instances': [2.0, 3.0, 4.0]} # Send request @@ -487,7 +486,7 @@ def testPredictREST(self): try: resp_data = tensorflow_model_server_test_base.CallREST(url, json_req) except Exception as e: # pylint: disable=broad-except - self.fail('Request failed with error: {}'.format(e)) + self.fail(f'Request failed with error: {e}') # Verify response self.assertEqual( @@ -500,7 +499,7 @@ def testPredictColumnarREST(self): model_path)[2].split(':') # Prepare request - url = 'http://{}:{}/v1/models/default:predict'.format(host, port) + url = f'http://{host}:{port}/v1/models/default:predict' json_req = {'inputs': [2.0, 3.0, 4.0]} # Send request @@ -508,7 +507,7 @@ def testPredictColumnarREST(self): try: resp_data = tensorflow_model_server_test_base.CallREST(url, json_req) except Exception as e: # pylint: disable=broad-except - self.fail('Request failed with error: {}'.format(e)) + self.fail(f'Request failed with error: {e}') # Verify response self.assertEqual( @@ -521,14 +520,14 @@ def testGetStatusREST(self): model_path)[2].split(':') # Prepare request - url = 'http://{}:{}/v1/models/default'.format(host, port) + url = f'http://{host}:{port}/v1/models/default' # Send request resp_data = None try: resp_data = tensorflow_model_server_test_base.CallREST(url, None) except Exception as e: # pylint: disable=broad-except - self.fail('Request failed with error: {}'.format(e)) + self.fail(f'Request failed with error: {e}') # Verify response self.assertEqual( @@ -550,14 +549,14 @@ def testGetModelMetadataREST(self): model_path)[2].split(':') # Prepare request - url = 'http://{}:{}/v1/models/default/metadata'.format(host, port) + url = f'http://{host}:{port}/v1/models/default/metadata' # Send request resp_data = None try: resp_data = tensorflow_model_server_test_base.CallREST(url, None) except Exception as e: # pylint: disable=broad-except - self.fail('Request failed with error: {}'.format(e)) + self.fail(f'Request failed with error: {e}') try: model_metadata_file = self._GetModelMetadataFile() @@ -573,7 +572,7 @@ def testGetModelMetadataREST(self): json.loads(resp_data.decode())), tensorflow_model_server_test_base.SortedObject(expected_metadata)) except Exception as e: # pylint: disable=broad-except - self.fail('Request failed with error: {}'.format(e)) + self.fail(f'Request failed with error: {e}') def testPrometheusEndpoint(self): """Test ModelStatus implementation over REST API with columnar inputs.""" @@ -584,14 +583,14 @@ def testPrometheusEndpoint(self): monitoring_config_file=self._GetMonitoringConfigFile())[2].split(':') # Prepare request - url = 'http://{}:{}/monitoring/prometheus/metrics'.format(host, port) + url = f'http://{host}:{port}/monitoring/prometheus/metrics' # Send request resp_data = None try: resp_data = tensorflow_model_server_test_base.CallREST(url, None) except Exception as e: # pylint: disable=broad-except - self.fail('Request failed with error: {}'.format(e)) + self.fail(f'Request failed with error: {e}') # Verify that there should be some metric type information. self.assertIn('# TYPE', @@ -738,15 +737,13 @@ def test_profiler_service_with_valid_trace_request(self): 'default', model_path) # Prepare predict request - url = 'http://{}/v1/models/default:predict'.format(rest_addr) + url = f'http://{rest_addr}/v1/models/default:predict' json_req = '{"instances": [2.0, 3.0, 4.0]}' # In a subprocess, send a REST predict request every second for 3 seconds - exec_command = ("wget {} --content-on-error=on -O- --post-data '{}' " - "--header='Content-Type:application/json'").format( - url, json_req) - repeat_command = 'for n in {{1..3}}; do {} & sleep 1; done;'.format( - exec_command) + exec_command = (f"wget {url} --content-on-error=on -O- --post-data '{json_req}' " + "--header='Content-Type:application/json'") + repeat_command = f'for n in {{1..3}}; do {exec_command} & sleep 1; done;' proc = subprocess.Popen( repeat_command, shell=True, @@ -766,7 +763,7 @@ def test_profiler_service_with_valid_trace_request(self): # Log stdout & stderr of subprocess issuing predict requests for debugging out, err = proc.communicate() - print("stdout: '{}' | stderr: '{}'".format(out, err)) + print(f"stdout: '{out}' | stderr: '{err}'") def test_tf_text(self): """Test TF Text.""" diff --git a/tensorflow_serving/model_servers/tensorflow_model_server_test_client.py b/tensorflow_serving/model_servers/tensorflow_model_server_test_client.py index 1322d5eb167..e71a22fdce2 100644 --- a/tensorflow_serving/model_servers/tensorflow_model_server_test_client.py +++ b/tensorflow_serving/model_servers/tensorflow_model_server_test_client.py @@ -14,7 +14,6 @@ # ============================================================================== """Manual test client for tensorflow_model_server.""" -from __future__ import absolute_import, division, print_function import grpc import tensorflow as tf diff --git a/tensorflow_serving/model_servers/test_util/tensorflow_model_server_test_base.py b/tensorflow_serving/model_servers/test_util/tensorflow_model_server_test_base.py index 92df34a5b14..607cebf4a0a 100644 --- a/tensorflow_serving/model_servers/test_util/tensorflow_model_server_test_base.py +++ b/tensorflow_serving/model_servers/test_util/tensorflow_model_server_test_base.py @@ -15,7 +15,6 @@ """Tests for tensorflow_model_server.""" -from __future__ import absolute_import, division, print_function import atexit import json @@ -81,7 +80,7 @@ def WaitForServerReady(port): try: # Send empty request to missing model - channel = grpc.insecure_channel('localhost:{}'.format(port)) + channel = grpc.insecure_channel(f'localhost:{port}') stub = prediction_service_pb2_grpc.PredictionServiceStub(channel) stub.Predict(request, RPC_TIMEOUT) except grpc.RpcError as error: @@ -95,16 +94,15 @@ def CallREST(url, req, max_attempts=60): """Returns HTTP response body from a REST API call.""" for attempt in range(max_attempts): try: - print('Attempt {}: Sending request to {} with data:\n{}'.format( - attempt, url, req)) + print(f'Attempt {attempt}: Sending request to {url} with data:\n{req}') json_data = json.dumps(req).encode('utf-8') if req is not None else None resp = urllib.request.urlopen(urllib.request.Request(url, data=json_data)) resp_data = resp.read() - print('Received response:\n{}'.format(resp_data)) + print(f'Received response:\n{resp_data}') resp.close() return resp_data except Exception as e: # pylint: disable=broad-except - print('Failed attempt {}. Error: {}'.format(attempt, e)) + print(f'Failed attempt {attempt}. Error: {e}') if attempt == max_attempts - 1: raise print('Retrying...') @@ -184,9 +182,8 @@ def RunServer( return TensorflowModelServerTestBase.model_servers_dict[args_key] port = PickUnusedPort() rest_api_port = PickUnusedPort() - print(('Starting test server on port: {} for model_name: ' - '{}/model_config_file: {}'.format(port, model_name, - model_config_file))) + print(f'Starting test server on port: {port} for model_name: ' + f'{model_name}/model_config_file: {model_config_file}') command = os.path.join( TensorflowModelServerTestBase.TestSrcDirPath(model_server_path), diff --git a/tensorflow_serving/servables/tensorflow/testdata/export_counter.py b/tensorflow_serving/servables/tensorflow/testdata/export_counter.py index 5cff61650fc..2ce35d66ce2 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/export_counter.py +++ b/tensorflow_serving/servables/tensorflow/testdata/export_counter.py @@ -18,7 +18,6 @@ reset_counter, to test Predict service. """ -from __future__ import absolute_import, division, print_function import tensorflow as tf diff --git a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two.py b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two.py index 73d319ebd9f..c0d49b72429 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two.py +++ b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two.py @@ -45,7 +45,6 @@ --device=gpu """ -from __future__ import absolute_import, division, print_function import argparse import os From 2cabd3aff8cdaf1980970c76759280da200f07e8 Mon Sep 17 00:00:00 2001 From: smokestacklightnin <125844868+smokestacklightnin@users.noreply.github.com> Date: Fri, 8 Aug 2025 22:46:28 -0700 Subject: [PATCH 09/17] Make compliant with Ruff rule `RET505` --- .../test_util/tensorflow_model_server_test_base.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tensorflow_serving/model_servers/test_util/tensorflow_model_server_test_base.py b/tensorflow_serving/model_servers/test_util/tensorflow_model_server_test_base.py index 607cebf4a0a..7edfe75efcd 100644 --- a/tensorflow_serving/model_servers/test_util/tensorflow_model_server_test_base.py +++ b/tensorflow_serving/model_servers/test_util/tensorflow_model_server_test_base.py @@ -117,8 +117,7 @@ def SortedObject(obj): return sorted(SortedObject(x) for x in obj) if isinstance(obj, tuple): return list(sorted(SortedObject(x) for x in obj)) - else: - return obj + return obj def GetArgsKey(*args, **kwargs): From 69daca2ac8889dd2b137ec61a9c992328be41862 Mon Sep 17 00:00:00 2001 From: smokestacklightnin <125844868+smokestacklightnin@users.noreply.github.com> Date: Fri, 8 Aug 2025 22:48:14 -0700 Subject: [PATCH 10/17] Make compliant with Ruff rule `RET504` --- tensorflow_serving/example/mnist_input_data.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tensorflow_serving/example/mnist_input_data.py b/tensorflow_serving/example/mnist_input_data.py index 94bdae0db48..b9439e28652 100644 --- a/tensorflow_serving/example/mnist_input_data.py +++ b/tensorflow_serving/example/mnist_input_data.py @@ -64,8 +64,7 @@ def extract_images(filename): cols = _read32(bytestream) buf = bytestream.read(rows * cols * num_images) data = numpy.frombuffer(buf, dtype=numpy.uint8) - data = data.reshape(num_images, rows, cols, 1) - return data + return data.reshape(num_images, rows, cols, 1) def dense_to_one_hot(labels_dense, num_classes=10): From 182228e396420373767af4238a593bb2d221595e Mon Sep 17 00:00:00 2001 From: smokestacklightnin <125844868+smokestacklightnin@users.noreply.github.com> Date: Fri, 8 Aug 2025 22:50:52 -0700 Subject: [PATCH 11/17] Make complient with Ruff rule `RET503` --- .../test_util/tensorflow_model_server_test_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow_serving/model_servers/test_util/tensorflow_model_server_test_base.py b/tensorflow_serving/model_servers/test_util/tensorflow_model_server_test_base.py index 7edfe75efcd..09a2b188faa 100644 --- a/tensorflow_serving/model_servers/test_util/tensorflow_model_server_test_base.py +++ b/tensorflow_serving/model_servers/test_util/tensorflow_model_server_test_base.py @@ -92,7 +92,7 @@ def WaitForServerReady(port): def CallREST(url, req, max_attempts=60): """Returns HTTP response body from a REST API call.""" - for attempt in range(max_attempts): + for attempt in range(max_attempts): # noqa: RET503 try: print(f'Attempt {attempt}: Sending request to {url} with data:\n{req}') json_data = json.dumps(req).encode('utf-8') if req is not None else None From d177e3d8d9659663a7ffc7bf47a3fd81703abb17 Mon Sep 17 00:00:00 2001 From: smokestacklightnin <125844868+smokestacklightnin@users.noreply.github.com> Date: Fri, 8 Aug 2025 22:58:27 -0700 Subject: [PATCH 12/17] Make compliant with Ruff `NPY002` --- tensorflow_serving/example/mnist_input_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow_serving/example/mnist_input_data.py b/tensorflow_serving/example/mnist_input_data.py index b9439e28652..6cdbb9d27aa 100644 --- a/tensorflow_serving/example/mnist_input_data.py +++ b/tensorflow_serving/example/mnist_input_data.py @@ -154,7 +154,7 @@ def next_batch(self, batch_size, fake_data=False): self._epochs_completed += 1 # Shuffle the data perm = numpy.arange(self._num_examples) - numpy.random.shuffle(perm) + numpy.random.default_rng().shuffle(perm) self._images = self._images[perm] self._labels = self._labels[perm] # Start next epoch From a68279fd091483810149038f18ab4086ea5b6ddd Mon Sep 17 00:00:00 2001 From: smokestacklightnin <125844868+smokestacklightnin@users.noreply.github.com> Date: Fri, 8 Aug 2025 23:00:55 -0700 Subject: [PATCH 13/17] Make compliant with Ruff rule `F403` --- .../ops/remote_predict/python/ops/remote_predict_ops.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow_serving/experimental/tensorflow/ops/remote_predict/python/ops/remote_predict_ops.py b/tensorflow_serving/experimental/tensorflow/ops/remote_predict/python/ops/remote_predict_ops.py index 0093e3af7aa..5c613455c8d 100644 --- a/tensorflow_serving/experimental/tensorflow/ops/remote_predict/python/ops/remote_predict_ops.py +++ b/tensorflow_serving/experimental/tensorflow/ops/remote_predict/python/ops/remote_predict_ops.py @@ -24,7 +24,7 @@ ) # pylint: disable=wildcard-import -from tensorflow_serving.experimental.tensorflow.ops.remote_predict.ops.gen_remote_predict_op import * +from tensorflow_serving.experimental.tensorflow.ops.remote_predict.ops.gen_remote_predict_op import * # noqa: F403 # pylint: enable=wildcard-import From 49aa90442534816001aec6123944523686a84166 Mon Sep 17 00:00:00 2001 From: smokestacklightnin <125844868+smokestacklightnin@users.noreply.github.com> Date: Fri, 8 Aug 2025 23:07:08 -0700 Subject: [PATCH 14/17] Make compliant with Ruff rule `ARG005` --- .../model_servers/tensorflow_model_server_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tensorflow_serving/model_servers/tensorflow_model_server_test.py b/tensorflow_serving/model_servers/tensorflow_model_server_test.py index 429e3f150b8..ad5626cc64f 100644 --- a/tensorflow_serving/model_servers/tensorflow_model_server_test.py +++ b/tensorflow_serving/model_servers/tensorflow_model_server_test.py @@ -651,9 +651,9 @@ def test_tf_saved_model_save_multiple_signatures(self): base_path = os.path.join(self.get_temp_dir(), 'tf_saved_model_save') export_path = os.path.join(base_path, '00000123') root = tf.train.Checkpoint() - root.f = tf.function(lambda x: {'y': 1.}, + root.f = tf.function(lambda _: {'y': 1.}, input_signature=[tf.TensorSpec(None, tf.float32)]) - root.g = tf.function(lambda x: {'y': 2.}, + root.g = tf.function(lambda _: {'y': 2.}, input_signature=[tf.TensorSpec(None, tf.float32)]) tf.saved_model.experimental.save( root, export_path, From 36df528dcb582a0047917a4ef14e0df6b53c8c3e Mon Sep 17 00:00:00 2001 From: smokestacklightnin <125844868+smokestacklightnin@users.noreply.github.com> Date: Fri, 8 Aug 2025 23:11:59 -0700 Subject: [PATCH 15/17] Make compliant with Ruff rule `ARG002` --- tensorflow_serving/apis/model_service_pb2_grpc.py | 4 ++-- tensorflow_serving/apis/prediction_service_pb2_grpc.py | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/tensorflow_serving/apis/model_service_pb2_grpc.py b/tensorflow_serving/apis/model_service_pb2_grpc.py index 1db05fc26a2..76b4e1d4bf8 100644 --- a/tensorflow_serving/apis/model_service_pb2_grpc.py +++ b/tensorflow_serving/apis/model_service_pb2_grpc.py @@ -55,7 +55,7 @@ def __init__(self, channel): class ModelServiceServicer: """ModelService provides methods to query and update the state of the server e.g. which models/versions are being served.""" - def GetModelStatus(self, request, context): + def GetModelStatus(self, request, context): # noqa: ARG002 """Gets status of model. If the ModelSpec in the request does not specify version, information about all versions of the model will be returned. If the ModelSpec in the request does specify a version, the status of only that version will be returned. @@ -64,7 +64,7 @@ def GetModelStatus(self, request, context): context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') - def HandleReloadConfigRequest(self, request, context): + def HandleReloadConfigRequest(self, request, context): # noqa: ARG002 """Reloads the set of served models. The new config supersedes the old one, so if a model is omitted from the new config it will be unloaded and no longer served. diff --git a/tensorflow_serving/apis/prediction_service_pb2_grpc.py b/tensorflow_serving/apis/prediction_service_pb2_grpc.py index 14e18463e09..aea06eb0676 100644 --- a/tensorflow_serving/apis/prediction_service_pb2_grpc.py +++ b/tensorflow_serving/apis/prediction_service_pb2_grpc.py @@ -80,31 +80,31 @@ class PredictionServiceServicer: open source marker; do not remove """ - def Classify(self, request, context): + def Classify(self, request, context): # noqa: ARG002 """Classify.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') - def Regress(self, request, context): + def Regress(self, request, context): # noqa: ARG002 """Regress.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') - def Predict(self, request, context): + def Predict(self, request, context): # noqa: ARG002 """Predict -- provides access to loaded TensorFlow model.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') - def MultiInference(self, request, context): + def MultiInference(self, request, context): # noqa: ARG002 """MultiInference API for multi-headed models.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') - def GetModelMetadata(self, request, context): + def GetModelMetadata(self, request, context): # noqa: ARG002 """GetModelMetadata - provides access to metadata for loaded models.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') From af460eddd380b9d83d90ffe5ec69b08696b91d68 Mon Sep 17 00:00:00 2001 From: smokestacklightnin <125844868+smokestacklightnin@users.noreply.github.com> Date: Fri, 8 Aug 2025 23:13:25 -0700 Subject: [PATCH 16/17] Make compliant with Ruff rule `ARG001` --- .../experimental/example/remote_predict_client.py | 2 +- .../servables/tensorflow/testdata/export_counter.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tensorflow_serving/experimental/example/remote_predict_client.py b/tensorflow_serving/experimental/example/remote_predict_client.py index 882d9b984e9..7c06b57650a 100644 --- a/tensorflow_serving/experimental/example/remote_predict_client.py +++ b/tensorflow_serving/experimental/example/remote_predict_client.py @@ -41,7 +41,7 @@ FLAGS = tf.app.flags.FLAGS -def main(unused_argv): +def main(unused_argv): # noqa: ARG001 print("Call remote_predict_op") results = remote_predict_ops.run( [FLAGS.input_tensor_aliases], diff --git a/tensorflow_serving/servables/tensorflow/testdata/export_counter.py b/tensorflow_serving/servables/tensorflow/testdata/export_counter.py index 2ce35d66ce2..e25526701a2 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/export_counter.py +++ b/tensorflow_serving/servables/tensorflow/testdata/export_counter.py @@ -94,7 +94,7 @@ def export_model(output_dir): save_model(sess, signature_def_map, output_dir) -def main(unused_argv): +def main(unused_argv): # noqa: ARG001 export_model("/tmp/saved_model_counter/00000123") From 6e42eaa283148340b483d7e2edd7d086c6846678 Mon Sep 17 00:00:00 2001 From: smokestacklightnin <125844868+smokestacklightnin@users.noreply.github.com> Date: Fri, 8 Aug 2025 23:56:35 -0700 Subject: [PATCH 17/17] Make compliant with Ruff rule `UP031` --- .../example/mnist_input_data.py | 17 ++++---- .../example/mnist_saved_model.py | 5 ++- tensorflow_serving/example/resnet_client.py | 2 +- tensorflow_serving/example/resnet_warmup.py | 8 ++-- .../example/half_plus_two_with_rpop.py | 8 +--- .../tensorflow_model_server_test.py | 3 +- .../tensorflow_model_server_test_base.py | 10 ++--- .../testdata/parse_example_tflite.py | 4 +- .../testdata/saved_model_half_plus_two.py | 40 +++++-------------- 9 files changed, 36 insertions(+), 61 deletions(-) diff --git a/tensorflow_serving/example/mnist_input_data.py b/tensorflow_serving/example/mnist_input_data.py index 6cdbb9d27aa..61b61d167ad 100644 --- a/tensorflow_serving/example/mnist_input_data.py +++ b/tensorflow_serving/example/mnist_input_data.py @@ -41,7 +41,7 @@ def maybe_download(filename, work_directory): if not os.path.exists(filepath): filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath) statinfo = os.stat(filepath) - print('Successfully downloaded %s %d bytes.' % (filename, statinfo.st_size)) + print(f'Successfully downloaded {filename} {statinfo.st_size} bytes.') return filepath @@ -52,13 +52,13 @@ def _read32(bytestream): def extract_images(filename): """Extract the images into a 4D uint8 numpy array [index, y, x, depth].""" - print('Extracting %s' % filename) + print(f'Extracting {filename}') with gzip.open(filename) as bytestream: magic = _read32(bytestream) if magic != 2051: raise ValueError( - 'Invalid magic number %d in MNIST image file: %s' % - (magic, filename)) + f'Invalid magic number {magic} in MNIST image file: {filename}' + ) num_images = _read32(bytestream) rows = _read32(bytestream) cols = _read32(bytestream) @@ -78,13 +78,13 @@ def dense_to_one_hot(labels_dense, num_classes=10): def extract_labels(filename, one_hot=False): """Extract the labels into a 1D uint8 numpy array [index].""" - print('Extracting %s' % filename) + print(f'Extracting {filename}') with gzip.open(filename) as bytestream: magic = _read32(bytestream) if magic != 2049: raise ValueError( - 'Invalid magic number %d in MNIST label file: %s' % - (magic, filename)) + f'Invalid magic number {magic} in MNIST label file: {filename}' + ) num_items = _read32(bytestream) buf = bytestream.read(num_items) labels = numpy.frombuffer(buf, dtype=numpy.uint8) @@ -103,8 +103,7 @@ def __init__(self, images, labels, fake_data=False, one_hot=False): self.one_hot = one_hot else: assert images.shape[0] == labels.shape[0], ( - 'images.shape: %s labels.shape: %s' % (images.shape, - labels.shape)) + f'images.shape: {images.shape} labels.shape: {labels.shape}') self._num_examples = images.shape[0] # Convert shape from [num examples, rows, columns, depth] diff --git a/tensorflow_serving/example/mnist_saved_model.py b/tensorflow_serving/example/mnist_saved_model.py index a920517b2db..3cd608ed914 100644 --- a/tensorflow_serving/example/mnist_saved_model.py +++ b/tensorflow_serving/example/mnist_saved_model.py @@ -82,11 +82,12 @@ def main(_): train_step.run(feed_dict={x: batch[0], y_: batch[1]}) correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.math.reduce_mean(tf.cast(correct_prediction, 'float')) - print('training accuracy %g' % sess.run( + print(f'training accuracy {sess.run( accuracy, feed_dict={ x: mnist.test.images, y_: mnist.test.labels - })) + }):g}' + ) print('Done training!') # Export model diff --git a/tensorflow_serving/example/resnet_client.py b/tensorflow_serving/example/resnet_client.py index e6e9163ef9c..f04d5dda548 100644 --- a/tensorflow_serving/example/resnet_client.py +++ b/tensorflow_serving/example/resnet_client.py @@ -56,7 +56,7 @@ def main(): if MODEL_ACCEPT_JPG: # Compose a JSON Predict request (send JPEG image in base64). jpeg_bytes = base64.b64encode(dl_request.content).decode('utf-8') - predict_request = '{"instances" : [{"b64": "%s"}]}' % jpeg_bytes + predict_request = f'{{"instances" : [{{"b64": "{jpeg_bytes}"}}]}}' else: # Compose a JOSN Predict request (send the image tensor). jpeg_rgb = Image.open(io.BytesIO(dl_request.content)) diff --git a/tensorflow_serving/example/resnet_warmup.py b/tensorflow_serving/example/resnet_warmup.py index a7018f1e76a..082958e3890 100644 --- a/tensorflow_serving/example/resnet_warmup.py +++ b/tensorflow_serving/example/resnet_warmup.py @@ -62,8 +62,8 @@ def main(): model_dir = sys.argv[-1] if not os.path.isdir(model_dir): - print('The saved model directory: %s does not exist. ' - 'Specify the path of an existing model.' % model_dir) + print(f'The saved model directory: {model_dir} does not exist. ' + 'Specify the path of an existing model.') sys.exit(-1) # Create the assets.extra directory, assuming model_dir is the versioned @@ -98,8 +98,8 @@ def main(): predict_log=prediction_log_pb2.PredictLog(request=request)) writer.write(log.SerializeToString()) - print('Created the file \'%s\', restart tensorflow_model_server to warmup ' - 'the ResNet SavedModel.' % warmup_file) + print(f'Created the file \'{warmup_file}\', restart tensorflow_model_server to warmup ' + 'the ResNet SavedModel.') if __name__ == '__main__': main() diff --git a/tensorflow_serving/experimental/example/half_plus_two_with_rpop.py b/tensorflow_serving/experimental/example/half_plus_two_with_rpop.py index f90dea3f7dd..936c14311b9 100644 --- a/tensorflow_serving/experimental/example/half_plus_two_with_rpop.py +++ b/tensorflow_serving/experimental/example/half_plus_two_with_rpop.py @@ -129,12 +129,8 @@ def main(_): FLAGS.target_address, FLAGS.remote_model_name) print( - "SavedModel generated at: %(dir)s with target_address: %(target_address)s" - ", remote_model_name: %(remote_model_name)s. " % { - "dir": FLAGS.output_dir, - "target_address": FLAGS.target_address, - "remote_model_name": FLAGS.remote_model_name - }) + f"SavedModel generated at: {FLAGS.output_dir} with target_address: {FLAGS.target_address}" + f", remote_model_name: {FLAGS.remote_model_name}. ") if __name__ == "__main__": diff --git a/tensorflow_serving/model_servers/tensorflow_model_server_test.py b/tensorflow_serving/model_servers/tensorflow_model_server_test.py index ad5626cc64f..0220f7ae361 100644 --- a/tensorflow_serving/model_servers/tensorflow_model_server_test.py +++ b/tensorflow_serving/model_servers/tensorflow_model_server_test.py @@ -34,7 +34,6 @@ import grpc import tensorflow.compat.v1 as tf -from six.moves import range from tensorflow.python.platform import flags from tensorflow.python.profiler import profiler_client from tensorflow.python.saved_model import signature_constants @@ -600,7 +599,7 @@ def testPredictUDS(self): """Test saved model prediction over a Unix domain socket.""" _ = TensorflowModelServerTest.RunServer('default', self._GetSavedModelBundlePath()) - model_server_address = 'unix:%s' % GRPC_SOCKET_PATH + model_server_address = f'unix:{GRPC_SOCKET_PATH}' self.VerifyPredictRequest( model_server_address, expected_output=3.0, diff --git a/tensorflow_serving/model_servers/test_util/tensorflow_model_server_test_base.py b/tensorflow_serving/model_servers/test_util/tensorflow_model_server_test_base.py index 09a2b188faa..b97ce643456 100644 --- a/tensorflow_serving/model_servers/test_util/tensorflow_model_server_test_base.py +++ b/tensorflow_serving/model_servers/test_util/tensorflow_model_server_test_base.py @@ -26,7 +26,7 @@ import grpc import tensorflow as tf -from six.moves import range, urllib +from six.moves import urllib from tensorflow.core.framework import types_pb2 from tensorflow.python.platform import flags from tensorflow.python.saved_model import signature_constants @@ -45,8 +45,7 @@ def SetVirtualCpus(num_virtual_cpus): """Create virtual CPU devices if they haven't yet been created.""" if num_virtual_cpus < 1: - raise ValueError('`num_virtual_cpus` must be at least 1 not %r' % - (num_virtual_cpus,)) + raise ValueError(f'`num_virtual_cpus` must be at least 1 not {num_virtual_cpus!r}') physical_devices = tf.config.experimental.list_physical_devices('CPU') if not physical_devices: raise RuntimeError('No CPUs found') @@ -59,8 +58,9 @@ def SetVirtualCpus(num_virtual_cpus): physical_devices[0], virtual_devices) else: if len(configs) < num_virtual_cpus: - raise RuntimeError('Already configured with %d < %d virtual CPUs' % - (len(configs), num_virtual_cpus)) + raise RuntimeError( + f'Already configured with {len(configs)} < {num_virtual_cpus} virtual CPUs' + ) def PickUnusedPort(): diff --git a/tensorflow_serving/servables/tensorflow/testdata/parse_example_tflite.py b/tensorflow_serving/servables/tensorflow/testdata/parse_example_tflite.py index 2ffa3584a1b..86397ada26d 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/parse_example_tflite.py +++ b/tensorflow_serving/servables/tensorflow/testdata/parse_example_tflite.py @@ -92,9 +92,7 @@ def _generate_tflite_for_parse_example_with_string(export_dir): def main(_): _generate_tflite_for_parse_example_with_string(FLAGS.output_dir) - print("TFLite model generated at: %(dir)s" % { - "dir": FLAGS.output_dir - }) + print(f"TFLite model generated at: {FLAGS.output_dir}") if __name__ == "__main__": diff --git a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two.py b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two.py index c0d49b72429..36755af3082 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two.py +++ b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two.py @@ -410,57 +410,39 @@ def _generate_saved_model_for_half_plus_two( def main(_): _generate_saved_model_for_half_plus_two( FLAGS.output_dir, device_type=FLAGS.device) - print("SavedModel generated for %(device)s at: %(dir)s" % { - "device": FLAGS.device, - "dir": FLAGS.output_dir - }) + print(f"SavedModel generated for {FLAGS.device} at: {FLAGS.output_dir}") _generate_saved_model_for_half_plus_two( - "%s_%s" % (FLAGS.output_dir_tf2, FLAGS.device), + f"{FLAGS.output_dir_tf2}_{FLAGS.device}", tf2=True, device_type=FLAGS.device) print( - "SavedModel TF2 generated for %(device)s at: %(dir)s" % { - "device": FLAGS.device, - "dir": "%s_%s" % (FLAGS.output_dir_tf2, FLAGS.device), - }) + "SavedModel TF2 generated for {device} at: {dir}".format( + device=FLAGS.device, + dir=f"{FLAGS.output_dir_tf2}_{FLAGS.device}", + )) _generate_saved_model_for_half_plus_two( FLAGS.output_dir_pbtxt, as_text=True, device_type=FLAGS.device) - print("SavedModel generated for %(device)s at: %(dir)s" % { - "device": FLAGS.device, - "dir": FLAGS.output_dir_pbtxt - }) + print(f"SavedModel generated for {FLAGS.device} at: {FLAGS.output_dir_pbtxt}") _generate_saved_model_for_half_plus_two( FLAGS.output_dir_main_op, use_main_op=True, device_type=FLAGS.device) - print("SavedModel generated for %(device)s at: %(dir)s " % { - "device": FLAGS.device, - "dir": FLAGS.output_dir_main_op - }) + print(f"SavedModel generated for {FLAGS.device} at: {FLAGS.output_dir_main_op} ") _generate_saved_model_for_half_plus_two( FLAGS.output_dir_tflite, as_tflite=True, device_type=FLAGS.device) - print("SavedModel in TFLite format generated for %(device)s at: %(dir)s " % { - "device": FLAGS.device, - "dir": FLAGS.output_dir_tflite, - }) + print(f"SavedModel in TFLite format generated for {FLAGS.device} at: {FLAGS.output_dir_tflite} ") _generate_saved_model_for_half_plus_two( FLAGS.output_dir_mlmd, include_mlmd=True, device_type=FLAGS.device) - print("SavedModel with MLMD generated for %(device)s at: %(dir)s " % { - "device": FLAGS.device, - "dir": FLAGS.output_dir_mlmd, - }) + print(f"SavedModel with MLMD generated for {FLAGS.device} at: {FLAGS.output_dir_mlmd} ") _generate_saved_model_for_half_plus_two( FLAGS.output_dir_tflite_with_sigdef, device_type=FLAGS.device, as_tflite_with_sigdef=True) print("SavedModel in TFLite format with SignatureDef generated for " - "%(device)s at: %(dir)s " % { - "device": FLAGS.device, - "dir": FLAGS.output_dir_tflite_with_sigdef, - }) + f"{FLAGS.device} at: {FLAGS.output_dir_tflite_with_sigdef} ") if __name__ == "__main__":