Skip to content

Commit ea835dc

Browse files
authored
remove ndim referneces to support older pytorch versions (#415)
1 parent 7563770 commit ea835dc

File tree

17 files changed

+37
-36
lines changed

17 files changed

+37
-36
lines changed

CHANGELOG.md

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,4 +4,5 @@
44

55
### Added
66

7-
- Added reduced precision documentation page
7+
- Replaced Tensor.ndim references with len(tensor.shape) to support older pytorch versions
8+
- Added reduced precision documentation page

torch2trt/converters/ReLU6.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ def convert_ReLU6(ctx):
88
output = ctx.method_return
99

1010
input_a_trt, input_b_trt = add_missing_trt_tensors(ctx.network, [input, 6])
11-
input_a_trt, input_b_trt = broadcast_trt_tensors(ctx.network, [input_a_trt, input_b_trt], output.ndim - 1)
11+
input_a_trt, input_b_trt = broadcast_trt_tensors(ctx.network, [input_a_trt, input_b_trt], len(output.shape) - 1)
1212

1313
layer = ctx.network.add_activation(
1414
input=input_a_trt, type=trt.ActivationType.RELU)
@@ -20,4 +20,4 @@ def convert_ReLU6(ctx):
2020

2121
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 4, 5)])
2222
def test_relu6_basic():
23-
return torch.nn.ReLU6()
23+
return torch.nn.ReLU6()

torch2trt/converters/add.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ def convert_add(ctx):
1111
input_b = ctx.method_args[1]
1212
output = ctx.method_return
1313
input_a_trt, input_b_trt = add_missing_trt_tensors(ctx.network, [input_a, input_b])
14-
input_a_trt, input_b_trt = broadcast_trt_tensors(ctx.network, [input_a_trt, input_b_trt], output.ndim - 1)
14+
input_a_trt, input_b_trt = broadcast_trt_tensors(ctx.network, [input_a_trt, input_b_trt], len(output.shape) - 1)
1515
layer = ctx.network.add_elementwise(input_a_trt, input_b_trt, trt.ElementWiseOperation.SUM)
1616
output._trt = layer.get_output(0)
1717

@@ -106,4 +106,4 @@ def forward(self, x):
106106

107107
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 10, 10)])
108108
def test_add_constant_batch():
109-
return AddConstantBatch()
109+
return AddConstantBatch()

torch2trt/converters/cat.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ def convert_cat(ctx):
99

1010
output = ctx.method_return
1111
trt_inputs = add_missing_trt_tensors(ctx.network, inputs)
12-
trt_inputs = broadcast_trt_tensors(ctx.network, trt_inputs, output.ndim - 1)
12+
trt_inputs = broadcast_trt_tensors(ctx.network, trt_inputs, len(output.shape) - 1)
1313

1414
layer = ctx.network.add_concatenation(inputs=trt_inputs)
1515
layer.axis = dim - 1
@@ -25,4 +25,4 @@ def forward(self, *x):
2525

2626
@add_module_test(torch.float32, torch.device('cuda'), [(1, 4, 4), (1, 3, 4), (1, 17, 4)])
2727
def test_Cat_basic():
28-
return Cat(1)
28+
return Cat(1)

torch2trt/converters/compare.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ def convert_elementwise(ctx, op):
66
input_b = ctx.method_args[1]
77
output = ctx.method_return
88
input_a_trt, input_b_trt = add_missing_trt_tensors(ctx.network, [input_a, input_b])
9-
input_a_trt, input_b_trt = broadcast_trt_tensors(ctx.network, [input_a_trt, input_b_trt], output.ndim - 1)
9+
input_a_trt, input_b_trt = broadcast_trt_tensors(ctx.network, [input_a_trt, input_b_trt], len(output.shape) - 1)
1010
layer = ctx.network.add_elementwise(input_a_trt, input_b_trt, op)
1111
output._trt = layer.get_output(0)
1212

torch2trt/converters/div.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ def convert_div(ctx):
1212
input_b = ctx.method_args[1]
1313
output = ctx.method_return
1414
input_a_trt, input_b_trt = add_missing_trt_tensors(ctx.network, [input_a, input_b])
15-
input_a_trt, input_b_trt = broadcast_trt_tensors(ctx.network, [input_a_trt, input_b_trt], output.ndim - 1)
15+
input_a_trt, input_b_trt = broadcast_trt_tensors(ctx.network, [input_a_trt, input_b_trt], len(output.shape) - 1)
1616
layer = ctx.network.add_elementwise(input_a_trt, input_b_trt, trt.ElementWiseOperation.DIV)
1717
output._trt = layer.get_output(0)
1818

@@ -24,7 +24,7 @@ def convert_rdiv(ctx):
2424
input_b = ctx.method_args[0]
2525
output = ctx.method_return
2626
input_a_trt, input_b_trt = add_missing_trt_tensors(ctx.network, [input_a, input_b])
27-
input_a_trt, input_b_trt = broadcast_trt_tensors(ctx.network, [input_a_trt, input_b_trt], output.ndim - 1)
27+
input_a_trt, input_b_trt = broadcast_trt_tensors(ctx.network, [input_a_trt, input_b_trt], len(output.shape) - 1)
2828
layer = ctx.network.add_elementwise(input_a_trt, input_b_trt, trt.ElementWiseOperation.DIV)
2929
output._trt = layer.get_output(0)
3030

@@ -120,4 +120,4 @@ def forward(self, x):
120120

121121
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 10, 10)])
122122
def test_div_constant_batch():
123-
return DivConstantBatch()
123+
return DivConstantBatch()

torch2trt/converters/getitem.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ def convert_tensor_getitem(ctx):
3131

3232
# Step 1 - Replace ellipsis with expanded slices
3333

34-
num_ellipsis = input.ndim - num_slice_types(slices)
34+
num_ellipsis = len(input.shape) - num_slice_types(slices)
3535

3636
new_slices = []
3737
for s in slices:
@@ -152,4 +152,4 @@ def test_tensor_getitem_2d_append_2dim():
152152

153153
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 4, 3)])
154154
def test_tensor_getitem_2d_weird_combo():
155-
return LambdaModule(lambda x: x[:, 0:3:4, None, None, 1, ...])
155+
return LambdaModule(lambda x: x[:, 0:3:4, None, None, 1, ...])

torch2trt/converters/instance_norm.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ def convert_instance_norm(ctx):
5858

5959
eps_np = np.array([eps], dtype=np.float32)
6060
keep_dims = True
61-
reduce_axes = torch_dim_to_trt_axes(tuple(range(2, input.ndim)))
61+
reduce_axes = torch_dim_to_trt_axes(tuple(range(2, len(input.shape))))
6262

6363
# compute mean over spatial
6464
mean_trt = ctx.network.add_reduce(input._trt, trt.ReduceOperation.AVG, reduce_axes, keep_dims).get_output(0)
@@ -147,4 +147,4 @@ def test_instance_norm_2d_dynamic_affine():
147147

148148
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 3, 3, 3)])
149149
def test_instance_norm_3d_dynamic_affine():
150-
return torch.nn.InstanceNorm3d(10, affine=True, track_running_stats=False)
150+
return torch.nn.InstanceNorm3d(10, affine=True, track_running_stats=False)

torch2trt/converters/max.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,14 +8,14 @@ def __convert_max_elementwise(ctx):
88
input_b = ctx.method_args[1]
99
output = ctx.method_return
1010
input_a_trt, input_b_trt = add_missing_trt_tensors(ctx.network, [input_a, input_b])
11-
input_a_trt, input_b_trt = broadcast_trt_tensors(ctx.network, [input_a_trt, input_b_trt], output.ndim - 1)
11+
input_a_trt, input_b_trt = broadcast_trt_tensors(ctx.network, [input_a_trt, input_b_trt], len(output.shape) - 1)
1212
layer = ctx.network.add_elementwise(input_a_trt, input_b_trt, trt.ElementWiseOperation.MAX)
1313
output._trt = layer.get_output(0)
1414

1515

1616
def __convert_max_reduce(ctx):
1717
input = ctx.method_args[0]
18-
dim = get_arg(ctx, 'dim', pos=1, default=tuple(range(1, input.ndim)))
18+
dim = get_arg(ctx, 'dim', pos=1, default=tuple(range(1, len(input.shape))))
1919
keepdim = get_arg(ctx, 'keepdim', pos=2, default=False)
2020
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
2121
output_val = ctx.method_return[0]
@@ -59,4 +59,4 @@ def forward(self, x, y):
5959
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3), (1,)]) # broadcast
6060
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3), (1, 3, 3)]) # broadcast
6161
def test_max_elementwise():
62-
return MaxElementwise()
62+
return MaxElementwise()

torch2trt/converters/min.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,14 +8,14 @@ def __convert_min_elementwise(ctx):
88
input_b = ctx.method_args[1]
99
output = ctx.method_return
1010
input_a_trt, input_b_trt = add_missing_trt_tensors(ctx.network, [input_a, input_b])
11-
input_a_trt, input_b_trt = broadcast_trt_tensors(ctx.network, [input_a_trt, input_b_trt], output.ndim - 1)
11+
input_a_trt, input_b_trt = broadcast_trt_tensors(ctx.network, [input_a_trt, input_b_trt], len(output.shape) - 1)
1212
layer = ctx.network.add_elementwise(input_a_trt, input_b_trt, trt.ElementWiseOperation.MIN)
1313
output._trt = layer.get_output(0)
1414

1515

1616
def __convert_min_reduce(ctx):
1717
input = ctx.method_args[0]
18-
dim = get_arg(ctx, 'dim', pos=1, default=tuple(range(1,input.ndim)))
18+
dim = get_arg(ctx, 'dim', pos=1, default=tuple(range(1,len(input.shape))))
1919
keepdim = get_arg(ctx, 'keepdim', pos=2, default=False)
2020
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
2121
output_val = ctx.method_return[0]
@@ -59,4 +59,4 @@ def forward(self, x, y):
5959
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3), (1,)]) # broadcast
6060
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3), (1, 3, 3)]) # broadcast
6161
def test_min_elementwise():
62-
return MinElementwise()
62+
return MinElementwise()

0 commit comments

Comments
 (0)