Skip to content

Commit 66483e0

Browse files
authored
replace trt_version in tensorrt/impls (PaddlePaddle#75826)
* update python/paddle/tensorrt/impls to support tensorrt 10 * fix
1 parent dd19cfb commit 66483e0

File tree

12 files changed

+90
-90
lines changed

12 files changed

+90
-90
lines changed

python/paddle/tensorrt/impls/activation.py

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -298,8 +298,8 @@ def hardswish_converter(network, paddle_op, inputs):
298298
return hardswish_layer.get_output(0)
299299

300300

301-
@converter_registry.register("pd_op.elu", trt_version="8.x")
302-
@converter_registry.register("pd_op.elu_", trt_version="8.x")
301+
@converter_registry.register("pd_op.elu")
302+
@converter_registry.register("pd_op.elu_")
303303
def elu_converter(network, paddle_op, inputs):
304304
x = inputs[0]
305305
alpha = paddle_op.attrs()["alpha"]
@@ -309,7 +309,7 @@ def elu_converter(network, paddle_op, inputs):
309309
return elu_layer.get_output(0)
310310

311311

312-
@converter_registry.register("pd_op.softplus", trt_version="8.x")
312+
@converter_registry.register("pd_op.softplus")
313313
def softplus_converter(network, paddle_op, inputs):
314314
x = inputs[0]
315315
beta = paddle_op.attrs()["beta"]
@@ -328,8 +328,8 @@ def softplus_converter(network, paddle_op, inputs):
328328
return softplus_layer.get_output(0)
329329

330330

331-
@converter_registry.register("pd_op.swish", trt_version="8.x")
332-
@converter_registry.register("pd_op.silu", trt_version="8.x")
331+
@converter_registry.register("pd_op.swish")
332+
@converter_registry.register("pd_op.silu")
333333
def swish_silu_converter(network, paddle_op, inputs):
334334
layer_output = network.add_activation(
335335
inputs[0], activation_type_map[paddle_op.name()]
@@ -343,7 +343,7 @@ def swish_silu_converter(network, paddle_op, inputs):
343343
)
344344

345345

346-
@converter_registry.register("pd_op.tanh_shrink", trt_version="8.x")
346+
@converter_registry.register("pd_op.tanh_shrink")
347347
def tanh_shrink_converter(network, paddle_op, inputs):
348348
x = inputs[0]
349349
tanh_layer = network.add_activation(x, trt.ActivationType.TANH)
@@ -355,7 +355,7 @@ def tanh_shrink_converter(network, paddle_op, inputs):
355355
return subtract_layer.get_output(0)
356356

357357

358-
@converter_registry.register("pd_op.stanh", trt_version="8.x")
358+
@converter_registry.register("pd_op.stanh")
359359
def stanh_converter(network, paddle_op, inputs):
360360
x = inputs[0]
361361
scale_a = paddle_op.attrs()["scale_a"]
@@ -367,7 +367,7 @@ def stanh_converter(network, paddle_op, inputs):
367367
return stanh_layer.get_output(0)
368368

369369

370-
@converter_registry.register("pd_op.mish", trt_version="8.x")
370+
@converter_registry.register("pd_op.mish")
371371
def mish_converter(network, paddle_op, inputs):
372372
x = inputs[0]
373373
softplus_layer = network.add_activation(x, trt.ActivationType.SOFTPLUS)
@@ -385,7 +385,7 @@ def mish_converter(network, paddle_op, inputs):
385385
)
386386

387387

388-
@converter_registry.register("pd_op.celu", trt_version="8.x")
388+
@converter_registry.register("pd_op.celu")
389389
def celu_converter(network, paddle_op, inputs):
390390
input_tensor = inputs[0]
391391
alpha = paddle_op.attrs()["alpha"]
@@ -451,7 +451,7 @@ def celu_converter(network, paddle_op, inputs):
451451
return output_tensor
452452

453453

454-
@converter_registry.register("pd_op.thresholded_relu", trt_version="8.x")
454+
@converter_registry.register("pd_op.thresholded_relu")
455455
def thresholded_relu_converter(network, paddle_op, inputs):
456456
x = inputs[0]
457457
threshold = paddle_op.attrs()["threshold"]
@@ -463,8 +463,8 @@ def thresholded_relu_converter(network, paddle_op, inputs):
463463
return thresholded_relu_layer.get_output(0)
464464

465465

466-
@converter_registry.register("pd_op.leaky_relu", trt_version="8.x")
467-
@converter_registry.register("pd_op.leaky_relu_", trt_version="8.x")
466+
@converter_registry.register("pd_op.leaky_relu")
467+
@converter_registry.register("pd_op.leaky_relu_")
468468
def leaky_relu_converter(network, paddle_op, inputs):
469469
x = inputs[0]
470470
negative_slope = paddle_op.attrs()["negative_slope"]
@@ -474,7 +474,7 @@ def leaky_relu_converter(network, paddle_op, inputs):
474474
return leaky_relu_layer.get_output(0)
475475

476476

477-
@converter_registry.register("pd_op.selu", trt_version="8.x")
477+
@converter_registry.register("pd_op.selu")
478478
def selu_converter(network, paddle_op, inputs):
479479
x = inputs[0]
480480
alpha = paddle_op.attrs()["alpha"]
@@ -486,7 +486,7 @@ def selu_converter(network, paddle_op, inputs):
486486
return selu_layer.get_output(0)
487487

488488

489-
@converter_registry.register("pd_op.prelu", trt_version="8.x")
489+
@converter_registry.register("pd_op.prelu")
490490
def prelu_converter(network, paddle_op, inputs):
491491
input, alpha_data = inputs
492492
input_dims = input.shape

python/paddle/tensorrt/impls/common.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@
3030
from paddle.tensorrt.util import get_trt_version_list
3131

3232

33-
@converter_registry.register("pd_op.dropout", trt_version="8.x")
33+
@converter_registry.register("pd_op.dropout")
3434
def dropout_converter(network, paddle_op, inputs):
3535
input_x = inputs[0]
3636
dropout_prob = get_input_constant_value(paddle_op, inputs, 2)[0]

python/paddle/tensorrt/impls/creation.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -65,17 +65,17 @@ def full_converter(network, paddle_op, inputs):
6565
return full_layer.get_output(0)
6666

6767

68-
@converter_registry.register("pd_op.assign", trt_version="8.x")
69-
@converter_registry.register("pd_op.assign_out_", trt_version="8.x")
68+
@converter_registry.register("pd_op.assign")
69+
@converter_registry.register("pd_op.assign_out_")
7070
def assign_converter(network, paddle_op, inputs):
7171
input_tensor = inputs[0]
7272
identity_layer = network.add_identity(input_tensor)
7373
set_layer_name(identity_layer, paddle_op)
7474
return identity_layer.get_output(0)
7575

7676

77-
@converter_registry.register("pd_op.assign_value", trt_version="8.x")
78-
@converter_registry.register("pd_op.assign_value_", trt_version="8.x")
77+
@converter_registry.register("pd_op.assign_value")
78+
@converter_registry.register("pd_op.assign_value_")
7979
def assign_value_converter(network, paddle_op, inputs):
8080
attrs = paddle_op.attrs()
8181
shape = attrs['shape']
@@ -108,7 +108,7 @@ def assign_value_converter(network, paddle_op, inputs):
108108
return const_layer.get_output(0)
109109

110110

111-
@converter_registry.register("pd_op.arange", trt_version="8.x")
111+
@converter_registry.register("pd_op.arange")
112112
def arange_converter(network, paddle_op, inputs):
113113
start, end, step = inputs
114114
zero_tensor = add_1D_constant_layer(
@@ -163,7 +163,7 @@ def arange_converter(network, paddle_op, inputs):
163163
return output_tensor
164164

165165

166-
@converter_registry.register("pd_op.full_like", trt_version="8.x")
166+
@converter_registry.register("pd_op.full_like")
167167
def full_like_converter(network, paddle_op, inputs):
168168
input_tensor = inputs[0]
169169
shape = input_tensor.shape
@@ -273,7 +273,7 @@ def full_like_converter(network, paddle_op, inputs):
273273
return output
274274

275275

276-
@converter_registry.register("pd_op.full_with_tensor", trt_version="8.x")
276+
@converter_registry.register("pd_op.full_with_tensor")
277277
def full_with_tensor_converter(network, paddle_op, inputs):
278278
value_input = inputs[0]
279279

@@ -373,7 +373,7 @@ def full_with_tensor_converter(network, paddle_op, inputs):
373373
return output_tensor
374374

375375

376-
@converter_registry.register("pd_op.meshgrid", trt_version="8.x")
376+
@converter_registry.register("pd_op.meshgrid")
377377
def meshgrid_converter(network, paddle_op, vec_inputs):
378378
inputs = vec_inputs[0]
379379
n = len(inputs)

python/paddle/tensorrt/impls/einsum.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
from paddle.tensorrt.register import converter_registry
1818

1919

20-
@converter_registry.register("pd_op.einsum", trt_version="8.x")
20+
@converter_registry.register("pd_op.einsum")
2121
def convert_einsum(network, paddle_op, inputs):
2222
equation = paddle_op.attrs().get("equation", "")
2323

python/paddle/tensorrt/impls/logic.py

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -35,23 +35,23 @@
3535
}
3636

3737

38-
@converter_registry.register("pd_op.greater_than", trt_version="8.x")
39-
@converter_registry.register("pd_op.less_than", trt_version="8.x")
40-
@converter_registry.register("pd_op.equal", trt_version="8.x")
41-
@converter_registry.register("pd_op.bitwise_and", trt_version="8.x")
42-
@converter_registry.register("pd_op.bitwise_or", trt_version="8.x")
43-
@converter_registry.register("pd_op.logical_xor", trt_version="8.x")
44-
@converter_registry.register("pd_op.logical_or", trt_version="8.x")
45-
@converter_registry.register("pd_op.logical_or_", trt_version="8.x")
46-
@converter_registry.register("pd_op.logical_and", trt_version="8.x")
38+
@converter_registry.register("pd_op.greater_than")
39+
@converter_registry.register("pd_op.less_than")
40+
@converter_registry.register("pd_op.equal")
41+
@converter_registry.register("pd_op.bitwise_and")
42+
@converter_registry.register("pd_op.bitwise_or")
43+
@converter_registry.register("pd_op.logical_xor")
44+
@converter_registry.register("pd_op.logical_or")
45+
@converter_registry.register("pd_op.logical_or_")
46+
@converter_registry.register("pd_op.logical_and")
4747
def logic_converter(network, paddle_op, inputs):
4848
layer_output = add_elementwise_layer(
4949
network, paddle_op, inputs, logic_type_map[paddle_op.name()]
5050
)
5151
return layer_output
5252

5353

54-
@converter_registry.register("pd_op.not_equal", trt_version="8.x")
54+
@converter_registry.register("pd_op.not_equal")
5555
def not_equal_converter(network, paddle_op, inputs):
5656
layer_output = add_elementwise_layer(
5757
network, paddle_op, inputs, trt.ElementWiseOperation.EQUAL
@@ -62,7 +62,7 @@ def not_equal_converter(network, paddle_op, inputs):
6262
return layer_output
6363

6464

65-
@converter_registry.register("pd_op.bitwise_not", trt_version="8.x")
65+
@converter_registry.register("pd_op.bitwise_not")
6666
def bitwise_not_converter(network, paddle_op, inputs):
6767
input_tensor = inputs[0]
6868
if input_tensor.dtype == trt.bool:
@@ -93,8 +93,8 @@ def bitwise_not_converter(network, paddle_op, inputs):
9393
return layer_output
9494

9595

96-
@converter_registry.register("pd_op.logical_not", trt_version="8.x")
97-
@converter_registry.register("pd_op.logical_not_", trt_version="8.x")
96+
@converter_registry.register("pd_op.logical_not")
97+
@converter_registry.register("pd_op.logical_not_")
9898
def logic_not_converter(network, paddle_op, inputs):
9999
layer_output = unary_op_converter(network, paddle_op, inputs)
100100
return layer_output

python/paddle/tensorrt/impls/manipulation.py

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ def reshape_converter(network, paddle_op, inputs):
8484
return layer.get_output(0)
8585

8686

87-
@converter_registry.register("pd_op.gather", trt_version="8.x")
87+
@converter_registry.register("pd_op.gather")
8888
def gather_converter(network, paddle_op, inputs):
8989
input_tensor = inputs[0]
9090
index_tensor = inputs[1]
@@ -101,7 +101,7 @@ def gather_converter(network, paddle_op, inputs):
101101
return gather_layer.get_output(0)
102102

103103

104-
@converter_registry.register("pd_op.gather_nd", trt_version="8.x")
104+
@converter_registry.register("pd_op.gather_nd")
105105
def gather_nd_converter(network, paddle_op, inputs):
106106
input_tensor, indices_tensor = inputs
107107
non_zero_layer = network.add_gather_v2(
@@ -405,8 +405,8 @@ def expand_as_converter(network, paddle_op, inputs):
405405
)
406406

407407

408-
@converter_registry.register("pd_op.cast", trt_version="8.x")
409-
@converter_registry.register("pd_op.cast_", trt_version="8.x")
408+
@converter_registry.register("pd_op.cast")
409+
@converter_registry.register("pd_op.cast_")
410410
def cast_converter(network, paddle_op, inputs):
411411
input_tensor = inputs[0]
412412
out_dtype = int(paddle_op.attrs().get("dtype"))
@@ -624,7 +624,7 @@ def slice_converter(network, paddle_op, inputs):
624624
return output_tensor
625625

626626

627-
@converter_registry.register("pd_op.split_with_num", trt_version="8.x")
627+
@converter_registry.register("pd_op.split_with_num")
628628
def split_with_num_converter(network, paddle_op, inputs):
629629
input_tensor = inputs[0]
630630
input_shape_size = len(input_tensor.shape)
@@ -756,7 +756,7 @@ def split_with_num_converter(network, paddle_op, inputs):
756756
return outputs
757757

758758

759-
@converter_registry.register("pd_op.split", trt_version="8.x")
759+
@converter_registry.register("pd_op.split")
760760
def split_converter(network, paddle_op, inputs):
761761
input_tensor = inputs[0]
762762
input_shape = input_tensor.shape
@@ -938,7 +938,7 @@ def split_converter(network, paddle_op, inputs):
938938
return outputs
939939

940940

941-
@converter_registry.register("pd_op.stack", trt_version="8.x")
941+
@converter_registry.register("pd_op.stack")
942942
def stack_converter(network, paddle_op, inputs):
943943
input_tensors = inputs[0]
944944
input_num = len(input_tensors)
@@ -1012,7 +1012,7 @@ def stack_converter(network, paddle_op, inputs):
10121012
return output_tensor
10131013

10141014

1015-
@converter_registry.register("pd_op.tile", trt_version="8.x")
1015+
@converter_registry.register("pd_op.tile")
10161016
def tile_converter(network, paddle_op, inputs):
10171017
input = inputs[0]
10181018
input_shape = input.shape
@@ -1120,7 +1120,7 @@ def take_along_axis_converter(network, paddle_op, inputs):
11201120
return output_tensor
11211121

11221122

1123-
@converter_registry.register("pd_op.strided_slice", trt_version="8.x")
1123+
@converter_registry.register("pd_op.strided_slice")
11241124
def strided_slice_converter(network, paddle_op, inputs):
11251125
input_tensor = inputs[0]
11261126
axes = paddle_op.attrs()["axes"]
@@ -1228,7 +1228,7 @@ def strided_slice_converter(network, paddle_op, inputs):
12281228
return layer.get_output(0)
12291229

12301230

1231-
@converter_registry.register("pd_op.roll", trt_version="8.x")
1231+
@converter_registry.register("pd_op.roll")
12321232
def roll_converter(network, paddle_op, inputs):
12331233
input_tensor = inputs[0]
12341234
axis = paddle_op.attrs()["axis"]
@@ -1373,7 +1373,7 @@ def roll_converter(network, paddle_op, inputs):
13731373
return layer.get_output(0)
13741374

13751375

1376-
@converter_registry.register("pd_op.pad", trt_version="8.x")
1376+
@converter_registry.register("pd_op.pad")
13771377
def pad_converter(network, paddle_op, inputs):
13781378
input_tensor = inputs[0]
13791379
paddings = paddle_op.attrs()["paddings"]
@@ -1385,7 +1385,7 @@ def pad_converter(network, paddle_op, inputs):
13851385
return layer.get_output(0)
13861386

13871387

1388-
@converter_registry.register("pd_op.pad3d", trt_version="8.x")
1388+
@converter_registry.register("pd_op.pad3d")
13891389
def pad3d_converter(network, paddle_op, inputs):
13901390
input_tensor, paddings = inputs
13911391
value = paddle_op.attrs().get("pad_value", 0.0)
@@ -1501,7 +1501,7 @@ def pad3d_converter(network, paddle_op, inputs):
15011501
return slice_layer.get_output(0)
15021502

15031503

1504-
@converter_registry.register("pd_op.numel", trt_version="8.x")
1504+
@converter_registry.register("pd_op.numel")
15051505
def numel_converter(network, paddle_op, inputs):
15061506
input_tensor = inputs[0]
15071507
shape_tensor = network.add_shape(input_tensor)
@@ -1514,7 +1514,7 @@ def numel_converter(network, paddle_op, inputs):
15141514
return layer.get_output(0)
15151515

15161516

1517-
@converter_registry.register("pd_op.index_put", trt_version="8.x")
1517+
@converter_registry.register("pd_op.index_put")
15181518
def index_put_converter(network, paddle_op, inputs):
15191519
input_tensor, indices_list, value_tensor = inputs
15201520
indices_tensor = indices_list[0]

0 commit comments

Comments
 (0)