|
1 | 1 | # Copyright (c) 2021 Mobvoi Inc. (authors: Binbin Zhang) |
2 | | -# 2024 Alibaba Inc (authors: Xiang Lyu) |
| 2 | +# 2024 Alibaba Inc (authors: Xiang Lyu, Zetao Hu) |
3 | 3 | # |
4 | 4 | # Licensed under the Apache License, Version 2.0 (the "License"); |
5 | 5 | # you may not use this file except in compliance with the License. |
|
14 | 14 | # limitations under the License. |
15 | 15 |
|
16 | 16 | import json |
| 17 | +import tensorrt as trt |
17 | 18 | import torchaudio |
18 | 19 | import logging |
19 | 20 | logging.getLogger('matplotlib').setLevel(logging.WARNING) |
@@ -45,3 +46,44 @@ def load_wav(wav, target_sr): |
45 | 46 | assert sample_rate > target_sr, 'wav sample rate {} must be greater than {}'.format(sample_rate, target_sr) |
46 | 47 | speech = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=target_sr)(speech) |
47 | 48 | return speech |
| 49 | + |
| 50 | + |
| 51 | +def convert_onnx_to_trt(trt_model, onnx_model, fp16): |
| 52 | + _min_shape = [(2, 80, 4), (2, 1, 4), (2, 80, 4), (2,), (2, 80), (2, 80, 4)] |
| 53 | + _opt_shape = [(2, 80, 193), (2, 1, 193), (2, 80, 193), (2,), (2, 80), (2, 80, 193)] |
| 54 | + _max_shape = [(2, 80, 6800), (2, 1, 6800), (2, 80, 6800), (2,), (2, 80), (2, 80, 6800)] |
| 55 | + input_names = ["x", "mask", "mu", "t", "spks", "cond"] |
| 56 | + |
| 57 | + logging.info("Converting onnx to trt...") |
| 58 | + network_flags = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) |
| 59 | + logger = trt.Logger(trt.Logger.INFO) |
| 60 | + builder = trt.Builder(logger) |
| 61 | + network = builder.create_network(network_flags) |
| 62 | + parser = trt.OnnxParser(network, logger) |
| 63 | + config = builder.create_builder_config() |
| 64 | + config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, 1 << 33) # 8GB |
| 65 | + if fp16: |
| 66 | + config.set_flag(trt.BuilderFlag.FP16) |
| 67 | + profile = builder.create_optimization_profile() |
| 68 | + # load onnx model |
| 69 | + with open(onnx_model, "rb") as f: |
| 70 | + if not parser.parse(f.read()): |
| 71 | + for error in range(parser.num_errors): |
| 72 | + print(parser.get_error(error)) |
| 73 | + raise ValueError('failed to parse {}'.format(onnx_model)) |
| 74 | + # set input shapes |
| 75 | + for i in range(len(input_names)): |
| 76 | + profile.set_shape(input_names[i], _min_shape[i], _opt_shape[i], _max_shape[i]) |
| 77 | + tensor_dtype = trt.DataType.HALF if fp16 else trt.DataType.FLOAT |
| 78 | + # set input and output data type |
| 79 | + for i in range(network.num_inputs): |
| 80 | + input_tensor = network.get_input(i) |
| 81 | + input_tensor.dtype = tensor_dtype |
| 82 | + for i in range(network.num_outputs): |
| 83 | + output_tensor = network.get_output(i) |
| 84 | + output_tensor.dtype = tensor_dtype |
| 85 | + config.add_optimization_profile(profile) |
| 86 | + engine_bytes = builder.build_serialized_network(network, config) |
| 87 | + # save trt engine |
| 88 | + with open(trt_model, "wb") as f: |
| 89 | + f.write(engine_bytes) |
0 commit comments