diff --git a/WORKSPACE b/WORKSPACE index f5e8bedf4aa..ea6b2cdb1c9 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -152,4 +152,3 @@ load( ) nccl_configure(name = "local_config_nccl") - diff --git a/tensorflow_serving/apis/model_service_pb2.py b/tensorflow_serving/apis/model_service_pb2.py index 76f1d606cb2..a641dd12404 100644 --- a/tensorflow_serving/apis/model_service_pb2.py +++ b/tensorflow_serving/apis/model_service_pb2.py @@ -18,19 +18,25 @@ # python -m grpc.tools.protoc --python_out=. --grpc_python_out=. -I. tensorflow_serving/apis/model_service.proto import sys + _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pb2 from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 + # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() -from tensorflow_serving.apis import get_model_status_pb2 as tensorflow__serving_dot_apis_dot_get__model__status__pb2 -from tensorflow_serving.apis import model_management_pb2 as tensorflow__serving_dot_apis_dot_model__management__pb2 +from tensorflow_serving.apis import ( + get_model_status_pb2 as tensorflow__serving_dot_apis_dot_get__model__status__pb2, +) +from tensorflow_serving.apis import ( + model_management_pb2 as tensorflow__serving_dot_apis_dot_model__management__pb2, +) DESCRIPTOR = _descriptor.FileDescriptor( name='tensorflow_serving/apis/model_service.proto', diff --git a/tensorflow_serving/apis/model_service_pb2_grpc.py b/tensorflow_serving/apis/model_service_pb2_grpc.py index 44c578b8648..76b4e1d4bf8 100644 --- a/tensorflow_serving/apis/model_service_pb2_grpc.py +++ b/tensorflow_serving/apis/model_service_pb2_grpc.py @@ -19,14 +19,16 @@ import grpc -from tensorflow_serving.apis import get_model_status_pb2 as tensorflow__serving_dot_apis_dot_get__model__status__pb2 -from tensorflow_serving.apis import model_management_pb2 as tensorflow__serving_dot_apis_dot_model__management__pb2 +from tensorflow_serving.apis import ( + get_model_status_pb2 as tensorflow__serving_dot_apis_dot_get__model__status__pb2, +) +from tensorflow_serving.apis import ( + model_management_pb2 as tensorflow__serving_dot_apis_dot_model__management__pb2, +) -class ModelServiceStub(object): - """ModelService provides methods to query and update the state of the server, - e.g. which models/versions are being served. - """ +class ModelServiceStub: + """ModelService provides methods to query and update the state of the server, e.g. which models/versions are being served.""" def __init__(self, channel): """Constructor. @@ -50,25 +52,22 @@ def __init__(self, channel): ) -class ModelServiceServicer(object): - """ModelService provides methods to query and update the state of the server, - e.g. which models/versions are being served. - """ +class ModelServiceServicer: + """ModelService provides methods to query and update the state of the server e.g. which models/versions are being served.""" - def GetModelStatus(self, request, context): - """Gets status of model. If the ModelSpec in the request does not specify - version, information about all versions of the model will be returned. If - the ModelSpec in the request does specify a version, the status of only - that version will be returned. + def GetModelStatus(self, request, context): # noqa: ARG002 + """Gets status of model. + + If the ModelSpec in the request does not specify version, information about all versions of the model will be returned. If the ModelSpec in the request does specify a version, the status of only that version will be returned. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') - def HandleReloadConfigRequest(self, request, context): - """Reloads the set of served models. The new config supersedes the old one, - so if a model is omitted from the new config it will be unloaded and no - longer served. + def HandleReloadConfigRequest(self, request, context): # noqa: ARG002 + """Reloads the set of served models. + + The new config supersedes the old one, so if a model is omitted from the new config it will be unloaded and no longer served. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') diff --git a/tensorflow_serving/apis/prediction_service_pb2.py b/tensorflow_serving/apis/prediction_service_pb2.py index e51a700f720..c0120472c57 100644 --- a/tensorflow_serving/apis/prediction_service_pb2.py +++ b/tensorflow_serving/apis/prediction_service_pb2.py @@ -22,23 +22,34 @@ # source: tensorflow_serving/apis/prediction_service.proto import sys + _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pb2 from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 + # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() -from tensorflow_serving.apis import classification_pb2 as tensorflow__serving_dot_apis_dot_classification__pb2 -from tensorflow_serving.apis import get_model_metadata_pb2 as tensorflow__serving_dot_apis_dot_get__model__metadata__pb2 -from tensorflow_serving.apis import inference_pb2 as tensorflow__serving_dot_apis_dot_inference__pb2 -from tensorflow_serving.apis import predict_pb2 as tensorflow__serving_dot_apis_dot_predict__pb2 -from tensorflow_serving.apis import regression_pb2 as tensorflow__serving_dot_apis_dot_regression__pb2 - +from tensorflow_serving.apis import ( + classification_pb2 as tensorflow__serving_dot_apis_dot_classification__pb2, +) +from tensorflow_serving.apis import ( + get_model_metadata_pb2 as tensorflow__serving_dot_apis_dot_get__model__metadata__pb2, +) +from tensorflow_serving.apis import ( + inference_pb2 as tensorflow__serving_dot_apis_dot_inference__pb2, +) +from tensorflow_serving.apis import ( + predict_pb2 as tensorflow__serving_dot_apis_dot_predict__pb2, +) +from tensorflow_serving.apis import ( + regression_pb2 as tensorflow__serving_dot_apis_dot_regression__pb2, +) DESCRIPTOR = _descriptor.FileDescriptor( name='tensorflow_serving/apis/prediction_service.proto', diff --git a/tensorflow_serving/apis/prediction_service_pb2_grpc.py b/tensorflow_serving/apis/prediction_service_pb2_grpc.py index 082f94a39a0..aea06eb0676 100644 --- a/tensorflow_serving/apis/prediction_service_pb2_grpc.py +++ b/tensorflow_serving/apis/prediction_service_pb2_grpc.py @@ -18,17 +18,27 @@ # python -m grpc.tools.protoc --python_out=. --grpc_python_out=. -I. tensorflow_serving/apis/prediction_service.proto import grpc -from tensorflow_serving.apis import classification_pb2 as tensorflow__serving_dot_apis_dot_classification__pb2 -from tensorflow_serving.apis import get_model_metadata_pb2 as tensorflow__serving_dot_apis_dot_get__model__metadata__pb2 -from tensorflow_serving.apis import inference_pb2 as tensorflow__serving_dot_apis_dot_inference__pb2 -from tensorflow_serving.apis import predict_pb2 as tensorflow__serving_dot_apis_dot_predict__pb2 -from tensorflow_serving.apis import regression_pb2 as tensorflow__serving_dot_apis_dot_regression__pb2 +from tensorflow_serving.apis import ( + classification_pb2 as tensorflow__serving_dot_apis_dot_classification__pb2, +) +from tensorflow_serving.apis import ( + get_model_metadata_pb2 as tensorflow__serving_dot_apis_dot_get__model__metadata__pb2, +) +from tensorflow_serving.apis import ( + inference_pb2 as tensorflow__serving_dot_apis_dot_inference__pb2, +) +from tensorflow_serving.apis import ( + predict_pb2 as tensorflow__serving_dot_apis_dot_predict__pb2, +) +from tensorflow_serving.apis import ( + regression_pb2 as tensorflow__serving_dot_apis_dot_regression__pb2, +) -class PredictionServiceStub(object): - """open source marker; do not remove - PredictionService provides access to machine-learned models loaded by - model_servers. +class PredictionServiceStub: + """PredictionService provides access to machine-learned models loaded by model_servers. + + open source marker; do not remove """ def __init__(self, channel): @@ -64,43 +74,38 @@ def __init__(self, channel): ) -class PredictionServiceServicer(object): - """open source marker; do not remove - PredictionService provides access to machine-learned models loaded by - model_servers. +class PredictionServiceServicer: + """PredictionService provides access to machine-learned models loaded by model_servers. + + open source marker; do not remove """ - def Classify(self, request, context): - """Classify. - """ + def Classify(self, request, context): # noqa: ARG002 + """Classify.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') - def Regress(self, request, context): - """Regress. - """ + def Regress(self, request, context): # noqa: ARG002 + """Regress.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') - def Predict(self, request, context): - """Predict -- provides access to loaded TensorFlow model. - """ + def Predict(self, request, context): # noqa: ARG002 + """Predict -- provides access to loaded TensorFlow model.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') - def MultiInference(self, request, context): - """MultiInference API for multi-headed models. - """ + def MultiInference(self, request, context): # noqa: ARG002 + """MultiInference API for multi-headed models.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') - def GetModelMetadata(self, request, context): - """GetModelMetadata - provides access to metadata for loaded models. - """ + def GetModelMetadata(self, request, context): # noqa: ARG002 + """GetModelMetadata - provides access to metadata for loaded models.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') diff --git a/tensorflow_serving/batching/test_util/matrix_half_plus_two_saved_model.py b/tensorflow_serving/batching/test_util/matrix_half_plus_two_saved_model.py index d111459009c..2c405dbd459 100644 --- a/tensorflow_serving/batching/test_util/matrix_half_plus_two_saved_model.py +++ b/tensorflow_serving/batching/test_util/matrix_half_plus_two_saved_model.py @@ -14,6 +14,7 @@ # ============================================================================== import tensorflow.compat.v1 as tf + FLAGS = tf.app.flags.FLAGS tf.app.flags.DEFINE_string("output_dir", "/tmp/matrix_half_plus_two/1", @@ -21,14 +22,15 @@ def _generate_saved_model_for_matrix_half_plus_two(export_dir): - """Creates SavedModel for half plus two model that accepts batches of - 3*3 matrices. - The model divides all elements in each matrix by 2 and adds 2 to them. - So, for one input matrix [[1, 2, 3], [4, 5, 6], [7, 8, 9]] - the result will be [[2.5, 3, 3.5], [4, 4.5, 5], [5.5, 6, 6.5]]. - Args: - export_dir: The directory where to write SavedModel files. - """ + """Creates SavedModel for half plus two model that accepts batches of 3*3 matrices. + + The model divides all elements in each matrix by 2 and adds 2 to them. + So, for one input matrix [[1, 2, 3], [4, 5, 6], [7, 8, 9]] + the result will be [[2.5, 3, 3.5], [4, 4.5, 5], [5.5, 6, 6.5]]. + + Args: + export_dir: The directory where to write SavedModel files. + """ builder = tf.saved_model.builder.SavedModelBuilder(export_dir) with tf.Session() as session: x = tf.placeholder(tf.float32, shape=[None, 3, 3], name="x") diff --git a/tensorflow_serving/example/mnist_client.py b/tensorflow_serving/example/mnist_client.py index fd90af69b2d..c4d4b5ba581 100644 --- a/tensorflow_serving/example/mnist_client.py +++ b/tensorflow_serving/example/mnist_client.py @@ -25,19 +25,16 @@ mnist_client.py --num_tests=100 --server=localhost:9000 """ -from __future__ import print_function import sys import threading import grpc +import mnist_input_data import numpy import tensorflow as tf -from tensorflow_serving.apis import predict_pb2 -from tensorflow_serving.apis import prediction_service_pb2_grpc -import mnist_input_data - +from tensorflow_serving.apis import predict_pb2, prediction_service_pb2_grpc tf.compat.v1.app.flags.DEFINE_integer( 'concurrency', 1, 'maximum number of concurrent inference requests') @@ -48,7 +45,7 @@ FLAGS = tf.compat.v1.app.flags.FLAGS -class _ResultCounter(object): +class _ResultCounter: """Counter for the prediction results.""" def __init__(self, num_tests, concurrency): @@ -92,6 +89,7 @@ def _create_rpc_callback(label, result_counter): Args: label: The correct label for the predicted example. result_counter: Counter for the prediction result. + Returns: The callback function. """ diff --git a/tensorflow_serving/example/mnist_input_data.py b/tensorflow_serving/example/mnist_input_data.py index 3e8021a2435..61b61d167ad 100644 --- a/tensorflow_serving/example/mnist_input_data.py +++ b/tensorflow_serving/example/mnist_input_data.py @@ -17,7 +17,6 @@ """Functions for downloading and reading MNIST data.""" -from __future__ import print_function import gzip import os @@ -42,7 +41,7 @@ def maybe_download(filename, work_directory): if not os.path.exists(filepath): filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath) statinfo = os.stat(filepath) - print('Successfully downloaded %s %d bytes.' % (filename, statinfo.st_size)) + print(f'Successfully downloaded {filename} {statinfo.st_size} bytes.') return filepath @@ -53,20 +52,19 @@ def _read32(bytestream): def extract_images(filename): """Extract the images into a 4D uint8 numpy array [index, y, x, depth].""" - print('Extracting %s' % filename) + print(f'Extracting {filename}') with gzip.open(filename) as bytestream: magic = _read32(bytestream) if magic != 2051: raise ValueError( - 'Invalid magic number %d in MNIST image file: %s' % - (magic, filename)) + f'Invalid magic number {magic} in MNIST image file: {filename}' + ) num_images = _read32(bytestream) rows = _read32(bytestream) cols = _read32(bytestream) buf = bytestream.read(rows * cols * num_images) data = numpy.frombuffer(buf, dtype=numpy.uint8) - data = data.reshape(num_images, rows, cols, 1) - return data + return data.reshape(num_images, rows, cols, 1) def dense_to_one_hot(labels_dense, num_classes=10): @@ -80,13 +78,13 @@ def dense_to_one_hot(labels_dense, num_classes=10): def extract_labels(filename, one_hot=False): """Extract the labels into a 1D uint8 numpy array [index].""" - print('Extracting %s' % filename) + print(f'Extracting {filename}') with gzip.open(filename) as bytestream: magic = _read32(bytestream) if magic != 2049: raise ValueError( - 'Invalid magic number %d in MNIST label file: %s' % - (magic, filename)) + f'Invalid magic number {magic} in MNIST label file: {filename}' + ) num_items = _read32(bytestream) buf = bytestream.read(num_items) labels = numpy.frombuffer(buf, dtype=numpy.uint8) @@ -95,19 +93,17 @@ def extract_labels(filename, one_hot=False): return labels -class DataSet(object): +class DataSet: """Class encompassing test, validation and training MNIST data set.""" def __init__(self, images, labels, fake_data=False, one_hot=False): """Construct a DataSet. one_hot arg is used only if fake_data is true.""" - if fake_data: self._num_examples = 10000 self.one_hot = one_hot else: assert images.shape[0] == labels.shape[0], ( - 'images.shape: %s labels.shape: %s' % (images.shape, - labels.shape)) + f'images.shape: {images.shape} labels.shape: {labels.shape}') self._num_examples = images.shape[0] # Convert shape from [num examples, rows, columns, depth] @@ -157,7 +153,7 @@ def next_batch(self, batch_size, fake_data=False): self._epochs_completed += 1 # Shuffle the data perm = numpy.arange(self._num_examples) - numpy.random.shuffle(perm) + numpy.random.default_rng().shuffle(perm) self._images = self._images[perm] self._labels = self._labels[perm] # Start next epoch @@ -171,7 +167,7 @@ def next_batch(self, batch_size, fake_data=False): def read_data_sets(train_dir, fake_data=False, one_hot=False): """Return training, validation and testing data sets.""" - class DataSets(object): + class DataSets: pass data_sets = DataSets() diff --git a/tensorflow_serving/example/mnist_saved_model.py b/tensorflow_serving/example/mnist_saved_model.py index 0bb3053893c..3cd608ed914 100644 --- a/tensorflow_serving/example/mnist_saved_model.py +++ b/tensorflow_serving/example/mnist_saved_model.py @@ -25,17 +25,14 @@ export_dir """ -from __future__ import print_function import os import sys +import mnist_input_data import tensorflow as tf - from tensorflow.python.ops import lookup_ops -import mnist_input_data - tf.compat.v1.app.flags.DEFINE_integer('training_iteration', 1000, 'number of training iterations.') tf.compat.v1.app.flags.DEFINE_integer('model_version', 1, @@ -85,11 +82,12 @@ def main(_): train_step.run(feed_dict={x: batch[0], y_: batch[1]}) correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.math.reduce_mean(tf.cast(correct_prediction, 'float')) - print('training accuracy %g' % sess.run( + print(f'training accuracy {sess.run( accuracy, feed_dict={ x: mnist.test.images, y_: mnist.test.labels - })) + }):g}' + ) print('Done training!') # Export model diff --git a/tensorflow_serving/example/resnet_client.py b/tensorflow_serving/example/resnet_client.py index b531166a91d..f04d5dda548 100644 --- a/tensorflow_serving/example/resnet_client.py +++ b/tensorflow_serving/example/resnet_client.py @@ -27,15 +27,14 @@ resnet_client.py """ -from __future__ import print_function import base64 import io import json import numpy as np -from PIL import Image import requests +from PIL import Image # The server URL specifies the endpoint of your server running the ResNet # model with the name "resnet" and using the predict interface. @@ -57,7 +56,7 @@ def main(): if MODEL_ACCEPT_JPG: # Compose a JSON Predict request (send JPEG image in base64). jpeg_bytes = base64.b64encode(dl_request.content).decode('utf-8') - predict_request = '{"instances" : [{"b64": "%s"}]}' % jpeg_bytes + predict_request = f'{{"instances" : [{{"b64": "{jpeg_bytes}"}}]}}' else: # Compose a JOSN Predict request (send the image tensor). jpeg_rgb = Image.open(io.BytesIO(dl_request.content)) @@ -79,8 +78,7 @@ def main(): total_time += response.elapsed.total_seconds() prediction = response.json()['predictions'][0] - print('Prediction class: {}, avg latency: {} ms'.format( - np.argmax(prediction), (total_time * 1000) / num_requests)) + print(f'Prediction class: {np.argmax(prediction)}, avg latency: {(total_time * 1000) / num_requests} ms') if __name__ == '__main__': diff --git a/tensorflow_serving/example/resnet_client_grpc.py b/tensorflow_serving/example/resnet_client_grpc.py index 96e22ee8d2c..0d721815260 100644 --- a/tensorflow_serving/example/resnet_client_grpc.py +++ b/tensorflow_serving/example/resnet_client_grpc.py @@ -12,22 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Send JPEG image to tensorflow_model_server loaded with ResNet model. +"""Send JPEG image to tensorflow_model_server loaded with ResNet model.""" -""" - -from __future__ import print_function import io import grpc import numpy as np -from PIL import Image import requests import tensorflow as tf +from PIL import Image -from tensorflow_serving.apis import predict_pb2 -from tensorflow_serving.apis import prediction_service_pb2_grpc +from tensorflow_serving.apis import predict_pb2, prediction_service_pb2_grpc # The image URL is the location of the image we should send to the server IMAGE_URL = 'https://tensorflow.org/images/blogs/serving/cat.jpg' @@ -71,7 +67,7 @@ def main(_): tf.make_tensor_proto(data)) result = stub.Predict(request, 10.0) # 10 secs timeout result = result.outputs['activation_49'].float_val - print('Prediction class: {}'.format(np.argmax(result))) + print(f'Prediction class: {np.argmax(result)}') if __name__ == '__main__': diff --git a/tensorflow_serving/example/resnet_warmup.py b/tensorflow_serving/example/resnet_warmup.py index c7a6af151fb..082958e3890 100644 --- a/tensorflow_serving/example/resnet_warmup.py +++ b/tensorflow_serving/example/resnet_warmup.py @@ -14,36 +14,34 @@ # ============================================================================== """Creates the tf_serving_warmup_requests file to warm up a ResNet SavedModel. - 1. Invoke this script passing in the saved_model directory (including version - folder, the folder containing saved_model.pb) as an argument. - 2. Restart tensorflow_model_server. +1. Invoke this script passing in the saved_model directory (including version +folder, the folder containing saved_model.pb) as an argument. +2. Restart tensorflow_model_server. - If unsure of the model directory, look for the output: - 'No warmup data file found at' in the tensorflow_model_server - startup log +If unsure of the model directory, look for the output: +'No warmup data file found at' in the tensorflow_model_server +startup log - After the script is run, and tensorflow_model_server is restarted, to verify - it is working look for the output: - 'Starting to read warmup data for model at' and 'Finished reading warmup data - for model at' in the tensorflow_model_server startup log +After the script is run, and tensorflow_model_server is restarted, to verify +it is working look for the output: +'Starting to read warmup data for model at' and 'Finished reading warmup data +for model at' in the tensorflow_model_server startup log - Usage example: - python resnet_warmup.py saved_model_dir +Usage example: +python resnet_warmup.py saved_model_dir """ -from __future__ import print_function import io import os import sys import numpy as np -from PIL import Image import requests import tensorflow as tf -from tensorflow_serving.apis import predict_pb2 -from tensorflow_serving.apis import prediction_log_pb2 +from PIL import Image +from tensorflow_serving.apis import predict_pb2, prediction_log_pb2 # IMAGE_URLS are the locations of the images we use to warmup the model IMAGE_URLS = ['https://tensorflow.org/images/blogs/serving/cat.jpg', @@ -64,8 +62,8 @@ def main(): model_dir = sys.argv[-1] if not os.path.isdir(model_dir): - print('The saved model directory: %s does not exist. ' - 'Specify the path of an existing model.' % model_dir) + print(f'The saved model directory: {model_dir} does not exist. ' + 'Specify the path of an existing model.') sys.exit(-1) # Create the assets.extra directory, assuming model_dir is the versioned @@ -100,8 +98,8 @@ def main(): predict_log=prediction_log_pb2.PredictLog(request=request)) writer.write(log.SerializeToString()) - print('Created the file \'%s\', restart tensorflow_model_server to warmup ' - 'the ResNet SavedModel.' % warmup_file) + print(f'Created the file \'{warmup_file}\', restart tensorflow_model_server to warmup ' + 'the ResNet SavedModel.') if __name__ == '__main__': main() diff --git a/tensorflow_serving/experimental/example/half_plus_two_with_rpop.py b/tensorflow_serving/experimental/example/half_plus_two_with_rpop.py index 07fe685d139..936c14311b9 100644 --- a/tensorflow_serving/experimental/example/half_plus_two_with_rpop.py +++ b/tensorflow_serving/experimental/example/half_plus_two_with_rpop.py @@ -29,7 +29,9 @@ import tensorflow.compat.v1 as tf -from tensorflow_serving.experimental.tensorflow.ops.remote_predict.python.ops import remote_predict_ops +from tensorflow_serving.experimental.tensorflow.ops.remote_predict.python.ops import ( + remote_predict_ops, +) tf.app.flags.DEFINE_string("output_dir", "/tmp/half_plus_two_with_rpop/1/", "Savedmodel export path") @@ -127,12 +129,8 @@ def main(_): FLAGS.target_address, FLAGS.remote_model_name) print( - "SavedModel generated at: %(dir)s with target_address: %(target_address)s" - ", remote_model_name: %(remote_model_name)s. " % { - "dir": FLAGS.output_dir, - "target_address": FLAGS.target_address, - "remote_model_name": FLAGS.remote_model_name - }) + f"SavedModel generated at: {FLAGS.output_dir} with target_address: {FLAGS.target_address}" + f", remote_model_name: {FLAGS.remote_model_name}. ") if __name__ == "__main__": diff --git a/tensorflow_serving/experimental/example/half_plus_two_with_rpop_client.py b/tensorflow_serving/experimental/example/half_plus_two_with_rpop_client.py index 261c8a029dc..8e7800ad1dc 100644 --- a/tensorflow_serving/experimental/example/half_plus_two_with_rpop_client.py +++ b/tensorflow_serving/experimental/example/half_plus_two_with_rpop_client.py @@ -36,13 +36,11 @@ tensorflow_model_server --port=8500 --model_config_file=/tmp/config_file.txt """ -from __future__ import print_function import grpc import tensorflow.compat.v1 as tf -from tensorflow_serving.apis import predict_pb2 -from tensorflow_serving.apis import prediction_service_pb2_grpc +from tensorflow_serving.apis import predict_pb2, prediction_service_pb2_grpc tf.app.flags.DEFINE_string('server', 'localhost:8500', 'PredictionService host:port') diff --git a/tensorflow_serving/experimental/example/remote_predict_client.py b/tensorflow_serving/experimental/example/remote_predict_client.py index ccf98a12c2c..7c06b57650a 100644 --- a/tensorflow_serving/experimental/example/remote_predict_client.py +++ b/tensorflow_serving/experimental/example/remote_predict_client.py @@ -17,11 +17,12 @@ Example client code which calls the Remote Predict Op directly. """ -from __future__ import print_function import tensorflow.compat.v1 as tf -from tensorflow_serving.experimental.tensorflow.ops.remote_predict.python.ops import remote_predict_ops +from tensorflow_serving.experimental.tensorflow.ops.remote_predict.python.ops import ( + remote_predict_ops, +) tf.app.flags.DEFINE_string("input_tensor_aliases", "x", "Aliases of input tensors") @@ -40,7 +41,7 @@ FLAGS = tf.app.flags.FLAGS -def main(unused_argv): +def main(unused_argv): # noqa: ARG001 print("Call remote_predict_op") results = remote_predict_ops.run( [FLAGS.input_tensor_aliases], diff --git a/tensorflow_serving/experimental/tensorflow/ops/remote_predict/__init__.py b/tensorflow_serving/experimental/tensorflow/ops/remote_predict/__init__.py index c1597decea2..11a4bde193b 100644 --- a/tensorflow_serving/experimental/tensorflow/ops/remote_predict/__init__.py +++ b/tensorflow_serving/experimental/tensorflow/ops/remote_predict/__init__.py @@ -16,10 +16,11 @@ @@run """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function + from tensorflow.python.util.all_util import remove_undocumented -from tensorflow_serving.experimental.tensorflow.ops.remote_predict.python.ops.remote_predict_ops import run + +from tensorflow_serving.experimental.tensorflow.ops.remote_predict.python.ops.remote_predict_ops import ( + run, +) remove_undocumented(__name__) diff --git a/tensorflow_serving/experimental/tensorflow/ops/remote_predict/ops/remote_predict_op.cc b/tensorflow_serving/experimental/tensorflow/ops/remote_predict/ops/remote_predict_op.cc index def975ac60d..0710192f3e7 100644 --- a/tensorflow_serving/experimental/tensorflow/ops/remote_predict/ops/remote_predict_op.cc +++ b/tensorflow_serving/experimental/tensorflow/ops/remote_predict/ops/remote_predict_op.cc @@ -87,7 +87,7 @@ fail_op_on_rpc_error: If set true, the Op fails if the rpc fails, and returns Set true by default. max_rpc_deadline_millis: The rpc deadline for remote predict. The actual deadline is min(incoming_rpc_deadline, max_rpc_deadline_millis). -signature_name: the signature def for remote graph inference, defaulting to +signature_name: the signature def for remote graph inference, defaulting to "serving_default". target_address: Address of the server hosting the remote graph. model_name: Model name of the remote TF graph. @@ -102,7 +102,7 @@ output_tensor_aliases: Tensor of strings for the output tensor alias names to status_code: Returns the status code of the rpc call; basically converting tensorflow::error::Code to it's int value, so 0 means OK. status_error_message: Returns the error message in the rpc status. -output_tensors: Tensors returned by the Predict call on the remote graph, which +output_tensors: Tensors returned by the Predict call on the remote graph, which are in the same order as output_tensor_aliases. output_types: A list of types of the output tensors. Length of this list should be equal to the length of 'output_tensor_aliases'. diff --git a/tensorflow_serving/experimental/tensorflow/ops/remote_predict/python/ops/remote_predict_ops.py b/tensorflow_serving/experimental/tensorflow/ops/remote_predict/python/ops/remote_predict_ops.py index b2854503459..5c613455c8d 100644 --- a/tensorflow_serving/experimental/tensorflow/ops/remote_predict/python/ops/remote_predict_ops.py +++ b/tensorflow_serving/experimental/tensorflow/ops/remote_predict/python/ops/remote_predict_ops.py @@ -14,16 +14,18 @@ # ============================================================================== """Operations for RemotePredict.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function import os.path + import tensorflow.compat.v1 as tf -from tensorflow_serving.experimental.tensorflow.ops.remote_predict.ops import gen_remote_predict_op +from tensorflow_serving.experimental.tensorflow.ops.remote_predict.ops import ( + gen_remote_predict_op, +) + # pylint: disable=wildcard-import -from tensorflow_serving.experimental.tensorflow.ops.remote_predict.ops.gen_remote_predict_op import * +from tensorflow_serving.experimental.tensorflow.ops.remote_predict.ops.gen_remote_predict_op import * # noqa: F403 + # pylint: enable=wildcard-import _remote_predict_op_module = tf.load_op_library( diff --git a/tensorflow_serving/g3doc/saved_model_warmup.md b/tensorflow_serving/g3doc/saved_model_warmup.md index 1d4345ae758..838d0966ff8 100644 --- a/tensorflow_serving/g3doc/saved_model_warmup.md +++ b/tensorflow_serving/g3doc/saved_model_warmup.md @@ -48,5 +48,3 @@ Warmup data can be added in two ways: `YourSavedModel/assets.extra/tf_serving_warmup_requests` based on the validation requests provided via [RequestSpec](https://www.tensorflow.org/tfx/guide/infra_validator#requestspec). - - diff --git a/tensorflow_serving/model_servers/profiler_client.py b/tensorflow_serving/model_servers/profiler_client.py index c4573842980..65bb98f361c 100644 --- a/tensorflow_serving/model_servers/profiler_client.py +++ b/tensorflow_serving/model_servers/profiler_client.py @@ -14,12 +14,8 @@ # ============================================================================== """Simple client to send profiling request to ModelServer.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function import tensorflow as tf - from tensorflow.python.profiler import profiler_client diff --git a/tensorflow_serving/model_servers/tensorflow_model_server_test.py b/tensorflow_serving/model_servers/tensorflow_model_server_test.py index 8ad8f91b153..0220f7ae361 100644 --- a/tensorflow_serving/model_servers/tensorflow_model_server_test.py +++ b/tensorflow_serving/model_servers/tensorflow_model_server_test.py @@ -15,9 +15,6 @@ """Tests for tensorflow_model_server.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function import json import os @@ -25,7 +22,6 @@ import sys import time - # During the creation of the 'tensorflow_serving' package, bazel emits a python # 'tensorflow' module which contains its dependencies (e.g. example.proto). # Since bazel python libraries take precedence in sys.path and the created @@ -37,19 +33,20 @@ import grpc -from six.moves import range import tensorflow.compat.v1 as tf - from tensorflow.python.platform import flags from tensorflow.python.profiler import profiler_client from tensorflow.python.saved_model import signature_constants -from tensorflow_serving.apis import classification_pb2 -from tensorflow_serving.apis import get_model_metadata_pb2 -from tensorflow_serving.apis import get_model_status_pb2 -from tensorflow_serving.apis import inference_pb2 -from tensorflow_serving.apis import model_service_pb2_grpc -from tensorflow_serving.apis import prediction_service_pb2_grpc -from tensorflow_serving.apis import regression_pb2 + +from tensorflow_serving.apis import ( + classification_pb2, + get_model_metadata_pb2, + get_model_status_pb2, + inference_pb2, + model_service_pb2_grpc, + prediction_service_pb2_grpc, + regression_pb2, +) from tensorflow_serving.model_servers.test_util import tensorflow_model_server_test_base FLAGS = flags.FLAGS @@ -74,7 +71,7 @@ def __BuildModelConfigFile(self): in the configuration template file and writes it out to another file used by the test. """ - with open(self._GetGoodModelConfigTemplate(), 'r') as template_file: + with open(self._GetGoodModelConfigTemplate()) as template_file: config = template_file.read().replace('${TEST_HALF_PLUS_TWO_DIR}', self._GetSavedModelBundlePath()) config = config.replace('${TEST_HALF_PLUS_THREE_DIR}', @@ -314,7 +311,6 @@ def testBadModelConfig(self): def testModelConfigReload(self): """Test model server polls filesystem for model configuration.""" - base_config_proto = """ model_config_list: {{ config: {{ @@ -368,7 +364,6 @@ def testModelConfigReload(self): def testModelConfigReloadWithZeroPollPeriod(self): """Test model server does not poll filesystem for model config.""" - base_config_proto = """ model_config_list: {{ config: {{ @@ -442,7 +437,7 @@ def testClassifyREST(self): model_path)[2].split(':') # Prepare request - url = 'http://{}:{}/v1/models/default:classify'.format(host, port) + url = f'http://{host}:{port}/v1/models/default:classify' json_req = {'signature_name': 'classify_x_to_y', 'examples': [{'x': 2.0}]} # Send request @@ -450,7 +445,7 @@ def testClassifyREST(self): try: resp_data = tensorflow_model_server_test_base.CallREST(url, json_req) except Exception as e: # pylint: disable=broad-except - self.fail('Request failed with error: {}'.format(e)) + self.fail(f'Request failed with error: {e}') # Verify response self.assertEqual(json.loads(resp_data.decode()), {'results': [[['', 3.0]]]}) @@ -462,7 +457,7 @@ def testRegressREST(self): model_path)[2].split(':') # Prepare request - url = 'http://{}:{}/v1/models/default:regress'.format(host, port) + url = f'http://{host}:{port}/v1/models/default:regress' json_req = {'signature_name': 'regress_x_to_y', 'examples': [{'x': 2.0}]} # Send request @@ -470,7 +465,7 @@ def testRegressREST(self): try: resp_data = tensorflow_model_server_test_base.CallREST(url, json_req) except Exception as e: # pylint: disable=broad-except - self.fail('Request failed with error: {}'.format(e)) + self.fail(f'Request failed with error: {e}') # Verify response self.assertEqual(json.loads(resp_data.decode()), {'results': [3.0]}) @@ -482,7 +477,7 @@ def testPredictREST(self): model_path)[2].split(':') # Prepare request - url = 'http://{}:{}/v1/models/default:predict'.format(host, port) + url = f'http://{host}:{port}/v1/models/default:predict' json_req = {'instances': [2.0, 3.0, 4.0]} # Send request @@ -490,7 +485,7 @@ def testPredictREST(self): try: resp_data = tensorflow_model_server_test_base.CallREST(url, json_req) except Exception as e: # pylint: disable=broad-except - self.fail('Request failed with error: {}'.format(e)) + self.fail(f'Request failed with error: {e}') # Verify response self.assertEqual( @@ -503,7 +498,7 @@ def testPredictColumnarREST(self): model_path)[2].split(':') # Prepare request - url = 'http://{}:{}/v1/models/default:predict'.format(host, port) + url = f'http://{host}:{port}/v1/models/default:predict' json_req = {'inputs': [2.0, 3.0, 4.0]} # Send request @@ -511,7 +506,7 @@ def testPredictColumnarREST(self): try: resp_data = tensorflow_model_server_test_base.CallREST(url, json_req) except Exception as e: # pylint: disable=broad-except - self.fail('Request failed with error: {}'.format(e)) + self.fail(f'Request failed with error: {e}') # Verify response self.assertEqual( @@ -524,14 +519,14 @@ def testGetStatusREST(self): model_path)[2].split(':') # Prepare request - url = 'http://{}:{}/v1/models/default'.format(host, port) + url = f'http://{host}:{port}/v1/models/default' # Send request resp_data = None try: resp_data = tensorflow_model_server_test_base.CallREST(url, None) except Exception as e: # pylint: disable=broad-except - self.fail('Request failed with error: {}'.format(e)) + self.fail(f'Request failed with error: {e}') # Verify response self.assertEqual( @@ -553,14 +548,14 @@ def testGetModelMetadataREST(self): model_path)[2].split(':') # Prepare request - url = 'http://{}:{}/v1/models/default/metadata'.format(host, port) + url = f'http://{host}:{port}/v1/models/default/metadata' # Send request resp_data = None try: resp_data = tensorflow_model_server_test_base.CallREST(url, None) except Exception as e: # pylint: disable=broad-except - self.fail('Request failed with error: {}'.format(e)) + self.fail(f'Request failed with error: {e}') try: model_metadata_file = self._GetModelMetadataFile() @@ -576,7 +571,7 @@ def testGetModelMetadataREST(self): json.loads(resp_data.decode())), tensorflow_model_server_test_base.SortedObject(expected_metadata)) except Exception as e: # pylint: disable=broad-except - self.fail('Request failed with error: {}'.format(e)) + self.fail(f'Request failed with error: {e}') def testPrometheusEndpoint(self): """Test ModelStatus implementation over REST API with columnar inputs.""" @@ -587,14 +582,14 @@ def testPrometheusEndpoint(self): monitoring_config_file=self._GetMonitoringConfigFile())[2].split(':') # Prepare request - url = 'http://{}:{}/monitoring/prometheus/metrics'.format(host, port) + url = f'http://{host}:{port}/monitoring/prometheus/metrics' # Send request resp_data = None try: resp_data = tensorflow_model_server_test_base.CallREST(url, None) except Exception as e: # pylint: disable=broad-except - self.fail('Request failed with error: {}'.format(e)) + self.fail(f'Request failed with error: {e}') # Verify that there should be some metric type information. self.assertIn('# TYPE', @@ -604,7 +599,7 @@ def testPredictUDS(self): """Test saved model prediction over a Unix domain socket.""" _ = TensorflowModelServerTest.RunServer('default', self._GetSavedModelBundlePath()) - model_server_address = 'unix:%s' % GRPC_SOCKET_PATH + model_server_address = f'unix:{GRPC_SOCKET_PATH}' self.VerifyPredictRequest( model_server_address, expected_output=3.0, @@ -655,9 +650,9 @@ def test_tf_saved_model_save_multiple_signatures(self): base_path = os.path.join(self.get_temp_dir(), 'tf_saved_model_save') export_path = os.path.join(base_path, '00000123') root = tf.train.Checkpoint() - root.f = tf.function(lambda x: {'y': 1.}, + root.f = tf.function(lambda _: {'y': 1.}, input_signature=[tf.TensorSpec(None, tf.float32)]) - root.g = tf.function(lambda x: {'y': 2.}, + root.g = tf.function(lambda _: {'y': 2.}, input_signature=[tf.TensorSpec(None, tf.float32)]) tf.saved_model.experimental.save( root, export_path, @@ -735,22 +730,19 @@ def test_distrat_sequential_keras_saved_model_save(self): def test_profiler_service_with_valid_trace_request(self): """Test integration with profiler service by sending tracing requests.""" - # Start model server model_path = self._GetSavedModelBundlePath() _, grpc_addr, rest_addr = TensorflowModelServerTest.RunServer( 'default', model_path) # Prepare predict request - url = 'http://{}/v1/models/default:predict'.format(rest_addr) + url = f'http://{rest_addr}/v1/models/default:predict' json_req = '{"instances": [2.0, 3.0, 4.0]}' # In a subprocess, send a REST predict request every second for 3 seconds - exec_command = ("wget {} --content-on-error=on -O- --post-data '{}' " - "--header='Content-Type:application/json'").format( - url, json_req) - repeat_command = 'for n in {{1..3}}; do {} & sleep 1; done;'.format( - exec_command) + exec_command = (f"wget {url} --content-on-error=on -O- --post-data '{json_req}' " + "--header='Content-Type:application/json'") + repeat_command = f'for n in {{1..3}}; do {exec_command} & sleep 1; done;' proc = subprocess.Popen( repeat_command, shell=True, @@ -770,7 +762,7 @@ def test_profiler_service_with_valid_trace_request(self): # Log stdout & stderr of subprocess issuing predict requests for debugging out, err = proc.communicate() - print("stdout: '{}' | stderr: '{}'".format(out, err)) + print(f"stdout: '{out}' | stderr: '{err}'") def test_tf_text(self): """Test TF Text.""" diff --git a/tensorflow_serving/model_servers/tensorflow_model_server_test_client.py b/tensorflow_serving/model_servers/tensorflow_model_server_test_client.py index 11274737ec7..e71a22fdce2 100644 --- a/tensorflow_serving/model_servers/tensorflow_model_server_test_client.py +++ b/tensorflow_serving/model_servers/tensorflow_model_server_test_client.py @@ -14,18 +14,13 @@ # ============================================================================== """Manual test client for tensorflow_model_server.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function import grpc import tensorflow as tf - from tensorflow.core.framework import types_pb2 from tensorflow.python.platform import flags -from tensorflow_serving.apis import predict_pb2 -from tensorflow_serving.apis import prediction_service_pb2_grpc +from tensorflow_serving.apis import predict_pb2, prediction_service_pb2_grpc tf.compat.v1.app.flags.DEFINE_string('server', 'localhost:8500', 'inception_inference service host:port') diff --git a/tensorflow_serving/model_servers/test_util/tensorflow_model_server_test_base.py b/tensorflow_serving/model_servers/test_util/tensorflow_model_server_test_base.py index 1a0b250a12d..b97ce643456 100644 --- a/tensorflow_serving/model_servers/test_util/tensorflow_model_server_test_base.py +++ b/tensorflow_serving/model_servers/test_util/tensorflow_model_server_test_base.py @@ -15,9 +15,6 @@ """Tests for tensorflow_model_server.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function import atexit import json @@ -28,15 +25,13 @@ import time import grpc -from six.moves import range -from six.moves import urllib import tensorflow as tf - +from six.moves import urllib from tensorflow.core.framework import types_pb2 from tensorflow.python.platform import flags from tensorflow.python.saved_model import signature_constants -from tensorflow_serving.apis import predict_pb2 -from tensorflow_serving.apis import prediction_service_pb2_grpc + +from tensorflow_serving.apis import predict_pb2, prediction_service_pb2_grpc FLAGS = flags.FLAGS @@ -50,8 +45,7 @@ def SetVirtualCpus(num_virtual_cpus): """Create virtual CPU devices if they haven't yet been created.""" if num_virtual_cpus < 1: - raise ValueError('`num_virtual_cpus` must be at least 1 not %r' % - (num_virtual_cpus,)) + raise ValueError(f'`num_virtual_cpus` must be at least 1 not {num_virtual_cpus!r}') physical_devices = tf.config.experimental.list_physical_devices('CPU') if not physical_devices: raise RuntimeError('No CPUs found') @@ -64,8 +58,9 @@ def SetVirtualCpus(num_virtual_cpus): physical_devices[0], virtual_devices) else: if len(configs) < num_virtual_cpus: - raise RuntimeError('Already configured with %d < %d virtual CPUs' % - (len(configs), num_virtual_cpus)) + raise RuntimeError( + f'Already configured with {len(configs)} < {num_virtual_cpus} virtual CPUs' + ) def PickUnusedPort(): @@ -85,7 +80,7 @@ def WaitForServerReady(port): try: # Send empty request to missing model - channel = grpc.insecure_channel('localhost:{}'.format(port)) + channel = grpc.insecure_channel(f'localhost:{port}') stub = prediction_service_pb2_grpc.PredictionServiceStub(channel) stub.Predict(request, RPC_TIMEOUT) except grpc.RpcError as error: @@ -97,18 +92,17 @@ def WaitForServerReady(port): def CallREST(url, req, max_attempts=60): """Returns HTTP response body from a REST API call.""" - for attempt in range(max_attempts): + for attempt in range(max_attempts): # noqa: RET503 try: - print('Attempt {}: Sending request to {} with data:\n{}'.format( - attempt, url, req)) + print(f'Attempt {attempt}: Sending request to {url} with data:\n{req}') json_data = json.dumps(req).encode('utf-8') if req is not None else None resp = urllib.request.urlopen(urllib.request.Request(url, data=json_data)) resp_data = resp.read() - print('Received response:\n{}'.format(resp_data)) + print(f'Received response:\n{resp_data}') resp.close() return resp_data except Exception as e: # pylint: disable=broad-except - print('Failed attempt {}. Error: {}'.format(attempt, e)) + print(f'Failed attempt {attempt}. Error: {e}') if attempt == max_attempts - 1: raise print('Retrying...') @@ -123,8 +117,7 @@ def SortedObject(obj): return sorted(SortedObject(x) for x in obj) if isinstance(obj, tuple): return list(sorted(SortedObject(x) for x in obj)) - else: - return obj + return obj def GetArgsKey(*args, **kwargs): @@ -188,9 +181,8 @@ def RunServer( return TensorflowModelServerTestBase.model_servers_dict[args_key] port = PickUnusedPort() rest_api_port = PickUnusedPort() - print(('Starting test server on port: {} for model_name: ' - '{}/model_config_file: {}'.format(port, model_name, - model_config_file))) + print(f'Starting test server on port: {port} for model_name: ' + f'{model_name}/model_config_file: {model_config_file}') command = os.path.join( TensorflowModelServerTestBase.TestSrcDirPath(model_server_path), diff --git a/tensorflow_serving/servables/tensorflow/testdata/bad_model_config.txt b/tensorflow_serving/servables/tensorflow/testdata/bad_model_config.txt index 6649d3fd44a..887d07c4356 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/bad_model_config.txt +++ b/tensorflow_serving/servables/tensorflow/testdata/bad_model_config.txt @@ -1 +1 @@ -improperly formatted file \ No newline at end of file +improperly formatted file diff --git a/tensorflow_serving/servables/tensorflow/testdata/export_bad_half_plus_two.py b/tensorflow_serving/servables/tensorflow/testdata/export_bad_half_plus_two.py index b5b7bde74c7..5ba302957b9 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/export_bad_half_plus_two.py +++ b/tensorflow_serving/servables/tensorflow/testdata/export_bad_half_plus_two.py @@ -34,7 +34,7 @@ def Export(): # Calculate, y = a*x + b # here we use a placeholder 'x' which is fed at inference time. x = tf.placeholder(tf.float32) - y = tf.add(tf.multiply(a, x), b) + y = tf.add(tf.multiply(a, x), b) # noqa: F841 # Export the model without signatures. # Note that the model is intentionally exported without using exporter, diff --git a/tensorflow_serving/servables/tensorflow/testdata/export_counter.py b/tensorflow_serving/servables/tensorflow/testdata/export_counter.py index c4ada219c92..e25526701a2 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/export_counter.py +++ b/tensorflow_serving/servables/tensorflow/testdata/export_counter.py @@ -18,9 +18,6 @@ reset_counter, to test Predict service. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function import tensorflow as tf @@ -97,7 +94,7 @@ def export_model(output_dir): save_model(sess, signature_def_map, output_dir) -def main(unused_argv): +def main(unused_argv): # noqa: ARG001 export_model("/tmp/saved_model_counter/00000123") diff --git a/tensorflow_serving/servables/tensorflow/testdata/parse_example_tflite.README b/tensorflow_serving/servables/tensorflow/testdata/parse_example_tflite.README index d3fcb3d8d85..1b6a59f2654 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/parse_example_tflite.README +++ b/tensorflow_serving/servables/tensorflow/testdata/parse_example_tflite.README @@ -19,4 +19,3 @@ and is updated using bazel run -c opt parse_example_tflite_with_string cp /tmp/parse_example_tflite parse_example_tflite/00000123/model.tflite ``` - diff --git a/tensorflow_serving/servables/tensorflow/testdata/parse_example_tflite.py b/tensorflow_serving/servables/tensorflow/testdata/parse_example_tflite.py index 17790e90b2d..86397ada26d 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/parse_example_tflite.py +++ b/tensorflow_serving/servables/tensorflow/testdata/parse_example_tflite.py @@ -34,7 +34,6 @@ import sys import tensorflow.compat.v1 as tf - from tensorflow.lite.tools.signature import signature_def_utils FLAGS = None @@ -87,14 +86,13 @@ def _generate_tflite_for_parse_example_with_string(export_dir): k = tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY tflite_model = signature_def_utils.set_signature_defs( tflite_model, {k: predict_signature_def}) - open(export_dir + "/model.tflite", "wb").write(tflite_model) + with open(export_dir + "/model.tflite", "wb") as fp: + fp.write(tflite_model) def main(_): _generate_tflite_for_parse_example_with_string(FLAGS.output_dir) - print("TFLite model generated at: %(dir)s" % { - "dir": FLAGS.output_dir - }) + print(f"TFLite model generated at: {FLAGS.output_dir}") if __name__ == "__main__": diff --git a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_three/00000123/assets/foo.txt b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_three/00000123/assets/foo.txt index f9ff0366880..56c3e54a43f 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_three/00000123/assets/foo.txt +++ b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_three/00000123/assets/foo.txt @@ -1 +1 @@ -asset-file-contents \ No newline at end of file +asset-file-contents diff --git a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two.py b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two.py index fe05c062cf0..36755af3082 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two.py +++ b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two.py @@ -45,16 +45,12 @@ --device=gpu """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function import argparse import os import sys import tensorflow.compat.v1 as tf - from tensorflow.lite.tools.signature import signature_def_utils from tensorflow.python.lib.io import file_io @@ -199,7 +195,8 @@ def get_serving_signatures(self): } @tf.function(input_signature=[tf.TensorSpec(shape=[1], dtype=tf.float32)]) - def predict(self, x=tf.constant([0], shape=[1], dtype=tf.float32)): + def predict(self, x=None): + x = tf.constant([0], shape=[1], dtype=tf.float32) if x is None else x return {"y": self.compute(x, self.b)} @tf.function( @@ -386,7 +383,8 @@ def _generate_saved_model_for_half_plus_two( k = tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY tflite_model = signature_def_utils.set_signature_defs( tflite_model, {k: predict_signature_def}) - open(export_dir + "/model.tflite", "wb").write(tflite_model) + with open(export_dir + "/model.tflite", "wb") as fp: + fp.write(tflite_model) else: if use_main_op: builder.add_meta_graph_and_variables( @@ -412,57 +410,39 @@ def _generate_saved_model_for_half_plus_two( def main(_): _generate_saved_model_for_half_plus_two( FLAGS.output_dir, device_type=FLAGS.device) - print("SavedModel generated for %(device)s at: %(dir)s" % { - "device": FLAGS.device, - "dir": FLAGS.output_dir - }) + print(f"SavedModel generated for {FLAGS.device} at: {FLAGS.output_dir}") _generate_saved_model_for_half_plus_two( - "%s_%s" % (FLAGS.output_dir_tf2, FLAGS.device), + f"{FLAGS.output_dir_tf2}_{FLAGS.device}", tf2=True, device_type=FLAGS.device) print( - "SavedModel TF2 generated for %(device)s at: %(dir)s" % { - "device": FLAGS.device, - "dir": "%s_%s" % (FLAGS.output_dir_tf2, FLAGS.device), - }) + "SavedModel TF2 generated for {device} at: {dir}".format( + device=FLAGS.device, + dir=f"{FLAGS.output_dir_tf2}_{FLAGS.device}", + )) _generate_saved_model_for_half_plus_two( FLAGS.output_dir_pbtxt, as_text=True, device_type=FLAGS.device) - print("SavedModel generated for %(device)s at: %(dir)s" % { - "device": FLAGS.device, - "dir": FLAGS.output_dir_pbtxt - }) + print(f"SavedModel generated for {FLAGS.device} at: {FLAGS.output_dir_pbtxt}") _generate_saved_model_for_half_plus_two( FLAGS.output_dir_main_op, use_main_op=True, device_type=FLAGS.device) - print("SavedModel generated for %(device)s at: %(dir)s " % { - "device": FLAGS.device, - "dir": FLAGS.output_dir_main_op - }) + print(f"SavedModel generated for {FLAGS.device} at: {FLAGS.output_dir_main_op} ") _generate_saved_model_for_half_plus_two( FLAGS.output_dir_tflite, as_tflite=True, device_type=FLAGS.device) - print("SavedModel in TFLite format generated for %(device)s at: %(dir)s " % { - "device": FLAGS.device, - "dir": FLAGS.output_dir_tflite, - }) + print(f"SavedModel in TFLite format generated for {FLAGS.device} at: {FLAGS.output_dir_tflite} ") _generate_saved_model_for_half_plus_two( FLAGS.output_dir_mlmd, include_mlmd=True, device_type=FLAGS.device) - print("SavedModel with MLMD generated for %(device)s at: %(dir)s " % { - "device": FLAGS.device, - "dir": FLAGS.output_dir_mlmd, - }) + print(f"SavedModel with MLMD generated for {FLAGS.device} at: {FLAGS.output_dir_mlmd} ") _generate_saved_model_for_half_plus_two( FLAGS.output_dir_tflite_with_sigdef, device_type=FLAGS.device, as_tflite_with_sigdef=True) print("SavedModel in TFLite format with SignatureDef generated for " - "%(device)s at: %(dir)s " % { - "device": FLAGS.device, - "dir": FLAGS.output_dir_tflite_with_sigdef, - }) + f"{FLAGS.device} at: {FLAGS.output_dir_tflite_with_sigdef} ") if __name__ == "__main__": diff --git a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_2_versions/00000123/assets/foo.txt b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_2_versions/00000123/assets/foo.txt index f9ff0366880..56c3e54a43f 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_2_versions/00000123/assets/foo.txt +++ b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_2_versions/00000123/assets/foo.txt @@ -1 +1 @@ -asset-file-contents \ No newline at end of file +asset-file-contents diff --git a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_2_versions/00000124/assets/foo.txt b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_2_versions/00000124/assets/foo.txt index f9ff0366880..56c3e54a43f 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_2_versions/00000124/assets/foo.txt +++ b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_2_versions/00000124/assets/foo.txt @@ -1 +1 @@ -asset-file-contents \ No newline at end of file +asset-file-contents diff --git a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_cpu/00000123/assets/foo.txt b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_cpu/00000123/assets/foo.txt index f9ff0366880..56c3e54a43f 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_cpu/00000123/assets/foo.txt +++ b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_cpu/00000123/assets/foo.txt @@ -1 +1 @@ -asset-file-contents \ No newline at end of file +asset-file-contents diff --git a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_gpu_trt/00000123/assets/foo.txt b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_gpu_trt/00000123/assets/foo.txt index f9ff0366880..56c3e54a43f 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_gpu_trt/00000123/assets/foo.txt +++ b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_gpu_trt/00000123/assets/foo.txt @@ -1 +1 @@ -asset-file-contents \ No newline at end of file +asset-file-contents diff --git a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mkl/00000123/assets/foo.txt b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mkl/00000123/assets/foo.txt index f9ff0366880..56c3e54a43f 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mkl/00000123/assets/foo.txt +++ b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mkl/00000123/assets/foo.txt @@ -1 +1 @@ -asset-file-contents \ No newline at end of file +asset-file-contents diff --git a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mlmd/00000123/assets.extra/mlmd_uuid b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mlmd/00000123/assets.extra/mlmd_uuid index afc2417480e..cffaea71d35 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mlmd/00000123/assets.extra/mlmd_uuid +++ b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mlmd/00000123/assets.extra/mlmd_uuid @@ -1 +1 @@ -test_mlmd_uuid \ No newline at end of file +test_mlmd_uuid diff --git a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mlmd/00000123/assets/foo.txt b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mlmd/00000123/assets/foo.txt index f9ff0366880..56c3e54a43f 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mlmd/00000123/assets/foo.txt +++ b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mlmd/00000123/assets/foo.txt @@ -1 +1 @@ -asset-file-contents \ No newline at end of file +asset-file-contents diff --git a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_tf2_cpu/00000123/assets/foo.txt b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_tf2_cpu/00000123/assets/foo.txt index f9ff0366880..56c3e54a43f 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_tf2_cpu/00000123/assets/foo.txt +++ b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_tf2_cpu/00000123/assets/foo.txt @@ -1 +1 @@ -asset-file-contents \ No newline at end of file +asset-file-contents diff --git a/tensorflow_serving/servables/tensorflow/testdata/tf_text_regression.README b/tensorflow_serving/servables/tensorflow/testdata/tf_text_regression.README index faf81a58f1a..038352e0c66 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/tf_text_regression.README +++ b/tensorflow_serving/servables/tensorflow/testdata/tf_text_regression.README @@ -16,4 +16,3 @@ This model is used to test the integration with TF Text, and is updated using this script: https://github.com/tensorflow/text/blob/master/oss_scripts/model_server/save_models.py - diff --git a/tensorflow_serving/tools/docker/Dockerfile.devel-gpu b/tensorflow_serving/tools/docker/Dockerfile.devel-gpu index d33bca9642b..862ec1cd0a6 100644 --- a/tensorflow_serving/tools/docker/Dockerfile.devel-gpu +++ b/tensorflow_serving/tools/docker/Dockerfile.devel-gpu @@ -187,4 +187,3 @@ FROM binary_build as clean_build RUN bazel clean --expunge --color=yes && \ rm -rf /root/.cache CMD ["/bin/bash"] - diff --git a/tensorflow_serving/tools/docker/Dockerfile.gpu b/tensorflow_serving/tools/docker/Dockerfile.gpu index 80b210d5058..fe92f8ab636 100644 --- a/tensorflow_serving/tools/docker/Dockerfile.gpu +++ b/tensorflow_serving/tools/docker/Dockerfile.gpu @@ -87,4 +87,3 @@ tensorflow_model_server --port=8500 --rest_api_port=8501 \ && chmod +x /usr/bin/tf_serving_entrypoint.sh ENTRYPOINT ["/usr/bin/tf_serving_entrypoint.sh"] - diff --git a/tensorflow_serving/tools/pip_package/setup.py b/tensorflow_serving/tools/pip_package/setup.py index d7b21224ae7..820c9199cc4 100644 --- a/tensorflow_serving/tools/pip_package/setup.py +++ b/tensorflow_serving/tools/pip_package/setup.py @@ -25,8 +25,7 @@ import sys -from setuptools import find_packages -from setuptools import setup +from setuptools import find_packages, setup DOCLINES = __doc__.split('\n') diff --git a/tensorflow_serving/util/net_http/client/test_client/public/httpclient.h b/tensorflow_serving/util/net_http/client/test_client/public/httpclient.h index 52b624d9c07..14cb4e51cff 100644 --- a/tensorflow_serving/util/net_http/client/test_client/public/httpclient.h +++ b/tensorflow_serving/util/net_http/client/test_client/public/httpclient.h @@ -46,4 +46,3 @@ inline std::unique_ptr CreateEvHTTPConnection( } // namespace tensorflow #endif // THIRD_PARTY_TENSORFLOW_SERVING_UTIL_NET_HTTP_CLIENT_TEST_CLIENT_PUBLIC_HTTPCLIENT_H_ - diff --git a/tools/gen_status_stamp.sh b/tools/gen_status_stamp.sh index f8c840d6d8d..32a73498caa 100755 --- a/tools/gen_status_stamp.sh +++ b/tools/gen_status_stamp.sh @@ -35,5 +35,3 @@ if [ -d .git ] || git rev-parse --git-dir > /dev/null 2>&1; then else echo "BUILD_SCM_REVISION no_git" fi; - -