|
| 1 | + |
| 2 | +import numpy as np |
| 3 | +import cv2 |
| 4 | + |
| 5 | +import grpc |
| 6 | + |
| 7 | +from tritonclient.grpc import service_pb2, service_pb2_grpc |
| 8 | +import tritonclient.grpc.model_config_pb2 as mc |
| 9 | + |
| 10 | + |
| 11 | +np.random.seed(123) |
| 12 | +palette = np.random.randint(0, 256, (100, 3)) |
| 13 | + |
| 14 | + |
| 15 | + |
| 16 | +# url = '10.128.61.7:8001' |
| 17 | +url = '127.0.0.1:8001' |
| 18 | +model_name = 'bisenetv2' |
| 19 | +model_version = '1' |
| 20 | +inp_name = 'input_image' |
| 21 | +outp_name = 'preds' |
| 22 | +inp_dtype = 'FP32' |
| 23 | +outp_dtype = np.int64 |
| 24 | +inp_shape = [1, 3, 1024, 2048] |
| 25 | +outp_shape = [1024, 2048] |
| 26 | +impth = '../example.png' |
| 27 | +mean = [0.3257, 0.3690, 0.3223] # city, rgb |
| 28 | +std = [0.2112, 0.2148, 0.2115] |
| 29 | + |
| 30 | + |
| 31 | +option = [ |
| 32 | + ('grpc.max_receive_message_length', 1073741824), |
| 33 | + ('grpc.max_send_message_length', 1073741824), |
| 34 | + ] |
| 35 | +channel = grpc.insecure_channel(url, options=option) |
| 36 | +grpc_stub = service_pb2_grpc.GRPCInferenceServiceStub(channel) |
| 37 | + |
| 38 | + |
| 39 | +metadata_request = service_pb2.ModelMetadataRequest( |
| 40 | + name=model_name, version=model_version) |
| 41 | +metadata_response = grpc_stub.ModelMetadata(metadata_request) |
| 42 | +print(metadata_response) |
| 43 | + |
| 44 | +config_request = service_pb2.ModelConfigRequest( |
| 45 | + name=model_name, |
| 46 | + version=model_version) |
| 47 | +config_response = grpc_stub.ModelConfig(config_request) |
| 48 | +print(config_response) |
| 49 | + |
| 50 | + |
| 51 | +request = service_pb2.ModelInferRequest() |
| 52 | +request.model_name = model_name |
| 53 | +request.model_version = model_version |
| 54 | + |
| 55 | +inp = service_pb2.ModelInferRequest().InferInputTensor() |
| 56 | +inp.name = inp_name |
| 57 | +inp.datatype = inp_dtype |
| 58 | +inp.shape.extend(inp_shape) |
| 59 | + |
| 60 | + |
| 61 | +mean = np.array(mean).reshape(1, 1, 3) |
| 62 | +std = np.array(std).reshape(1, 1, 3) |
| 63 | +im = cv2.imread(impth)[:, :, ::-1] |
| 64 | +im = cv2.resize(im, dsize=tuple(inp_shape[-1:-3:-1])) |
| 65 | +im = ((im / 255.) - mean) / std |
| 66 | +im = im[None, ...].transpose(0, 3, 1, 2) |
| 67 | +inp_bytes = im.astype(np.float32).tobytes() |
| 68 | + |
| 69 | +request.ClearField("inputs") |
| 70 | +request.ClearField("raw_input_contents") |
| 71 | +request.inputs.extend([inp,]) |
| 72 | +request.raw_input_contents.extend([inp_bytes,]) |
| 73 | + |
| 74 | + |
| 75 | +outp = service_pb2.ModelInferRequest().InferRequestedOutputTensor() |
| 76 | +outp.name = outp_name |
| 77 | +request.outputs.extend([outp,]) |
| 78 | + |
| 79 | +# sync |
| 80 | +# resp = grpc_stub.ModelInfer(request).raw_output_contents[0] |
| 81 | +# async |
| 82 | +resp = grpc_stub.ModelInfer.future(request) |
| 83 | +resp = resp.result().raw_output_contents[0] |
| 84 | + |
| 85 | +out = np.frombuffer(resp, dtype=outp_dtype).reshape(*outp_shape) |
| 86 | + |
| 87 | +out = palette[out] |
| 88 | +cv2.imwrite('res.png', out) |
0 commit comments