1
- from typing import List , Optional , Sequence
1
+ import logging
2
+ from typing import List , Optional , Sequence , cast
2
3
3
4
from torch .fx .node import Target
4
5
from torch_tensorrt .dynamo ._SourceIR import SourceIR
5
6
from torch_tensorrt .dynamo .conversion ._ConversionContext import ConversionContext
6
7
from torch_tensorrt .dynamo .conversion .converter_utils import (
8
+ get_positive_dim ,
7
9
get_trt_tensor ,
8
10
set_layer_name ,
9
11
)
10
12
from torch_tensorrt .dynamo .types import TRTTensor
11
13
14
+ logger = logging .getLogger (__name__ )
15
+
12
16
13
17
def unsqueeze (
14
18
ctx : ConversionContext ,
@@ -18,12 +22,87 @@ def unsqueeze(
18
22
input : TRTTensor ,
19
23
dim : int ,
20
24
) -> TRTTensor :
25
+ from importlib .metadata import version
26
+
27
+ if version ("tensorrt" ) < "10.7.0" :
28
+ logger .warning (
29
+ f"IUnsqueezeLayer is supported starting from TensorRT 10.7.0, using the old unsqueeze implementation in the current TensorRT version: { version ('tensorrt' )} "
30
+ )
31
+ return unsqueeze_old (ctx , target , source_ir , name , input , dim )
21
32
axes = get_trt_tensor (ctx , dim , f"{ name } _axes" )
22
33
layer = ctx .net .add_unsqueeze (input , axes )
23
34
set_layer_name (layer , target , name , source_ir )
24
35
return layer .get_output (0 )
25
36
26
37
38
+ # old implementation for jetson due to IUnsqueezeLayer was not supported prior to 10.7.0
39
+ def unsqueeze_old (
40
+ ctx : ConversionContext ,
41
+ target : Target ,
42
+ source_ir : Optional [SourceIR ],
43
+ name : str ,
44
+ input : TRTTensor ,
45
+ dim : int ,
46
+ ) -> TRTTensor :
47
+ input_val = get_trt_tensor (ctx , input , f"{ name } _input" )
48
+ if not isinstance (input_val , TRTTensor ):
49
+ raise RuntimeError (
50
+ f"unsqueeze received input { input_val } that is not part "
51
+ "of the TensorRT region!"
52
+ )
53
+
54
+ dim = cast (int , dim )
55
+
56
+ input_shape_size = len (input_val .shape )
57
+ dim = get_positive_dim (dim , input_shape_size + 1 )
58
+
59
+ intermediate_dim = 0
60
+ dynamic_shape_cnt = 0
61
+ # if unsqueeze the last dimensions, we can directly append to the shape
62
+ if dim == input_shape_size :
63
+ intermediate_dim = dim
64
+ else :
65
+ # since maximum of one dimension is permitted to be specified as -1
66
+ # find the intermediate_dim which has only 1 dynamic_shape_cnt
67
+ # and then we can add a transpose after reshape if it is not the final shape we want
68
+ for i , s in reversed (list (enumerate (input_val .shape ))):
69
+ if i >= dim :
70
+ if s == - 1 :
71
+ dynamic_shape_cnt += 1
72
+ if dynamic_shape_cnt > 1 :
73
+ intermediate_dim = i + 1
74
+ break
75
+ if i == dim :
76
+ intermediate_dim = i
77
+ break
78
+ # calculate the new_shape for the shuffle layer's reshape_dims
79
+ new_shape = list (
80
+ tuple (input_val .shape )[:intermediate_dim ]
81
+ + (1 ,)
82
+ + tuple (input_val .shape )[intermediate_dim :]
83
+ )
84
+ for i , s in enumerate (new_shape ):
85
+ if i < intermediate_dim and s == - 1 :
86
+ new_shape [i ] = 0
87
+ layer = ctx .net .add_shuffle (input_val )
88
+ layer .reshape_dims = tuple (new_shape )
89
+ # if the intermediate_dim is not the final dim we want to unsqueeze, add a second_transpose after reshape
90
+ if intermediate_dim != dim :
91
+ # calculate the second_transpose for the shuffle layer
92
+ permutation = [* range (0 , len (new_shape ))]
93
+ # for example: if the reshape_dims is (3, 3, 5, 1, 5) and the final shape we want is (3, 1, 3, 5, 5)
94
+ # here intermediate_dim=3, dim=1, we need to move intermediate_dim before [dim: intermediate_dim)
95
+ new_permutation = (
96
+ tuple (permutation [:dim ])
97
+ + (intermediate_dim ,)
98
+ + tuple (permutation [dim :intermediate_dim ])
99
+ + tuple (permutation [intermediate_dim + 1 :])
100
+ )
101
+ layer .second_transpose = new_permutation
102
+ set_layer_name (layer , target , name , source_ir )
103
+ return layer .get_output (0 )
104
+
105
+
27
106
def broadcast_in_dim (
28
107
ctx : ConversionContext ,
29
108
target : Target ,
0 commit comments