|
| 1 | +#!/usr/bin/env python |
| 2 | + |
| 3 | +# The MIT License (MIT) |
| 4 | +# |
| 5 | +# Copyright (c) 2020 NVIDIA CORPORATION |
| 6 | +# |
| 7 | +# Permission is hereby granted, free of charge, to any person obtaining a copy of |
| 8 | +# this software and associated documentation files (the "Software"), to deal in |
| 9 | +# the Software without restriction, including without limitation the rights to |
| 10 | +# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of |
| 11 | +# the Software, and to permit persons to whom the Software is furnished to do so, |
| 12 | +# subject to the following conditions: |
| 13 | +# |
| 14 | +# The above copyright notice and this permission notice shall be included in all |
| 15 | +# copies or substantial portions of the Software. |
| 16 | +# |
| 17 | +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 18 | +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS |
| 19 | +# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR |
| 20 | +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER |
| 21 | +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 22 | +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 23 | + |
| 24 | +import argparse, os, sys |
| 25 | +import numpy as np |
| 26 | +from numpy.random import randint |
| 27 | +import tritongrpcclient |
| 28 | +from PIL import Image |
| 29 | +import math |
| 30 | + |
| 31 | +np.random.seed(100019) |
| 32 | + |
| 33 | +def parse_args(): |
| 34 | + parser = argparse.ArgumentParser() |
| 35 | + parser.add_argument('-v', '--verbose', action="store_true", required=False, default=False, |
| 36 | + help='Enable verbose output') |
| 37 | + parser.add_argument('-u', '--url', type=str, required=False, default='localhost:8001', |
| 38 | + help='Inference server URL. Default is localhost:8001.') |
| 39 | + parser.add_argument('--batch_size', type=int, required=False, default=4, |
| 40 | + help='Batch size') |
| 41 | + parser.add_argument('--n_iter', type=int, required=False, default=-1, |
| 42 | + help='Number of iterations , with `batch_size` size') |
| 43 | + parser.add_argument('--model_name', type=str, required=False, default="dali_identity_cpu", |
| 44 | + help='Model name') |
| 45 | + return parser.parse_args() |
| 46 | + |
| 47 | + |
| 48 | +def array_from_list(arrays): |
| 49 | + """ |
| 50 | + Convert list of ndarrays to single ndarray with ndims+=1 |
| 51 | + """ |
| 52 | + lengths = list(map(lambda x, arr=arrays: arr[x].shape[0], [x for x in range(len(arrays))])) |
| 53 | + max_len = max(lengths) |
| 54 | + arrays = list(map(lambda arr, ml=max_len: np.pad(arr, ((0, ml - arr.shape[0]))), arrays)) |
| 55 | + for arr in arrays: |
| 56 | + assert arr.shape == arrays[0].shape, "Arrays must have the same shape" |
| 57 | + return np.stack(arrays) |
| 58 | + |
| 59 | + |
| 60 | +def batcher(dataset, max_batch_size, n_iterations=-1): |
| 61 | + """ |
| 62 | + Generator, that splits dataset into batches with given batch size |
| 63 | + """ |
| 64 | + iter_idx = 0 |
| 65 | + data_idx = 0 |
| 66 | + while data_idx < len(dataset): |
| 67 | + if 0 < n_iterations <= iter_idx: |
| 68 | + raise StopIteration |
| 69 | + batch_size = min(randint(1, max_batch_size), len(dataset) - data_idx) |
| 70 | + iter_idx += 1 |
| 71 | + yield dataset[data_idx : data_idx + batch_size] |
| 72 | + data_idx += batch_size |
| 73 | + |
| 74 | + |
| 75 | +def main(): |
| 76 | + FLAGS = parse_args() |
| 77 | + try: |
| 78 | + triton_client = tritongrpcclient.InferenceServerClient(url=FLAGS.url, verbose=FLAGS.verbose) |
| 79 | + except Exception as e: |
| 80 | + print("channel creation failed: " + str(e)) |
| 81 | + sys.exit(1) |
| 82 | + |
| 83 | + if not (triton_client.is_server_live() or |
| 84 | + triton_client.is_server_ready() or |
| 85 | + triton_client.is_model_ready(model_name=FLAGS.model_name)): |
| 86 | + print("Error connecting to server: Server live {}. Server ready {}. Model ready {}".format( |
| 87 | + triton_client.is_server_live, triton_client.is_server_ready, |
| 88 | + triton_client.is_model_ready(model_name=FLAGS.model_name))) |
| 89 | + sys.exit(1) |
| 90 | + |
| 91 | + model_name = FLAGS.model_name |
| 92 | + model_version = -1 |
| 93 | + |
| 94 | + input_data = [randint(0, 255, size=randint(100), dtype='uint8') for _ in |
| 95 | + range(randint(100) * FLAGS.batch_size)] |
| 96 | + input_data = array_from_list(input_data) |
| 97 | + |
| 98 | + # Infer |
| 99 | + outputs = [] |
| 100 | + input_name = "DALI_INPUT_0" |
| 101 | + output_name = "DALI_OUTPUT_0" |
| 102 | + input_shape = list(input_data.shape) |
| 103 | + outputs.append(tritongrpcclient.InferRequestedOutput(output_name)) |
| 104 | + |
| 105 | + for batch in batcher(input_data, FLAGS.batch_size): |
| 106 | + print("Input mean before backend processing:", np.mean(batch)) |
| 107 | + input_shape[0] = np.shape(batch)[0] |
| 108 | + print("Batch size: ", input_shape[0]) |
| 109 | + inputs = [tritongrpcclient.InferInput(input_name, input_shape, "UINT8")] |
| 110 | + # Initialize the data |
| 111 | + inputs[0].set_data_from_numpy(batch) |
| 112 | + |
| 113 | + # Test with outputs |
| 114 | + results = triton_client.infer(model_name=model_name, |
| 115 | + inputs=inputs, |
| 116 | + outputs=outputs) |
| 117 | + |
| 118 | + # Get the output arrays from the results |
| 119 | + output0_data = results.as_numpy(output_name) |
| 120 | + print("Output mean after backend processing:", np.mean(output0_data)) |
| 121 | + print("Output shape: ", np.shape(output0_data)) |
| 122 | + if not math.isclose(np.mean(output0_data), np.mean(batch)): |
| 123 | + print("Pre/post average does not match") |
| 124 | + sys.exit(1) |
| 125 | + else: |
| 126 | + print("pass") |
| 127 | + |
| 128 | + statistics = triton_client.get_inference_statistics(model_name=model_name) |
| 129 | + if len(statistics.model_stats) != 1: |
| 130 | + print("FAILED: Inference Statistics") |
| 131 | + sys.exit(1) |
| 132 | + |
| 133 | + |
| 134 | +if __name__ == '__main__': |
| 135 | + main() |
0 commit comments