Skip to content

Commit 6cf5620

Browse files
alexbeloiWei Wei
authored andcommitted
remove opt-out for fbcode/deeplearning/trt in lint engine (#68)
Summary: Pull Request resolved: https://github.com/pytorch/fx2trt/pull/68 fbcode/deeplearning/trt/ is opted out of Black lint engine, removing opt-out (opting in) will show Black lint suggestions when running `arc lint`. Applies Black lint suggestions/formatting to fbcode/deeplearning/trt/* Reviewed By: yuhc Differential Revision: D36128102 fbshipit-source-id: a34014dbf4465ac9a0e86d281a0dcbde097584aa
1 parent 28813ab commit 6cf5620

File tree

123 files changed

+3479
-1539
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

123 files changed

+3479
-1539
lines changed

fx/converter_registry.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ def tensorrt_converter(
1212
key: Target,
1313
no_implicit_batch_dim: bool = False,
1414
no_explicit_batch_dim: bool = False,
15-
enabled: bool = True
15+
enabled: bool = True,
1616
) -> Callable[[Any], Any]:
1717
def register_converter(converter):
1818
CONVERTERS[key] = converter

fx/converters/acc_ops_converters.py

Lines changed: 517 additions & 199 deletions
Large diffs are not rendered by default.

fx/converters/activation.py

Lines changed: 31 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -7,9 +7,11 @@
77

88
from .converter_utils import mark_as_int8_layer
99

10-
def common_activation(network, mod, input_val, activation_type, activation_dyn_range_fn, layer_name):
11-
layer = network.add_activation(
12-
input=input_val, type=activation_type)
10+
11+
def common_activation(
12+
network, mod, input_val, activation_type, activation_dyn_range_fn, layer_name
13+
):
14+
layer = network.add_activation(input=input_val, type=activation_type)
1315
layer.name = layer_name
1416

1517
if input_val.dynamic_range:
@@ -27,28 +29,47 @@ def relu(network, submod, args, kwargs, layer_name):
2729
input_val = kwargs["input"]
2830

2931
if not isinstance(input_val, trt.tensorrt.ITensor):
30-
raise RuntimeError(f"ReLU received input {input_val} that is not part "
31-
"of the TensorRT region!")
32+
raise RuntimeError(
33+
f"ReLU received input {input_val} that is not part "
34+
"of the TensorRT region!"
35+
)
3236

3337
def activation_dyn_range_fn(dyn_range):
3438
return max(0, dyn_range[0]), max(0, dyn_range[1])
3539

36-
return common_activation(network, submod, input_val, trt.ActivationType.RELU, activation_dyn_range_fn, layer_name)
40+
return common_activation(
41+
network,
42+
submod,
43+
input_val,
44+
trt.ActivationType.RELU,
45+
activation_dyn_range_fn,
46+
layer_name,
47+
)
3748

3849

3950
@tensorrt_converter(torch.nn.modules.activation.Sigmoid)
4051
def sigmoid(network, submod, args, kwargs, layer_name):
4152
# args/kwargs should have already been normalized to kwargs
4253
assert len(args) == 0
43-
input_val = kwargs['input']
54+
input_val = kwargs["input"]
4455

4556
if not isinstance(input_val, trt.tensorrt.ITensor):
46-
raise RuntimeError(f'Sigmoid received input {input_val} that is not part '
47-
'of the TensorRT region!')
57+
raise RuntimeError(
58+
f"Sigmoid received input {input_val} that is not part "
59+
"of the TensorRT region!"
60+
)
4861

4962
def activation_dyn_range_fn(dyn_range):
5063
def sigmoid_fn(x):
5164
return 1 / (1 + np.exp(-x))
65+
5266
return sigmoid_fn(dyn_range[0]), sigmoid_fn(dyn_range[1])
5367

54-
return common_activation(network, submod, input_val, trt.ActivationType.SIGMOID, activation_dyn_range_fn, layer_name)
68+
return common_activation(
69+
network,
70+
submod,
71+
input_val,
72+
trt.ActivationType.SIGMOID,
73+
activation_dyn_range_fn,
74+
layer_name,
75+
)

fx/converters/adaptive_avgpool.py

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -8,21 +8,28 @@
88
extend_mod_attr_to_tuple,
99
)
1010

11+
1112
@tensorrt_converter(torch.nn.modules.pooling.AdaptiveAvgPool2d)
1213
def adaptive_avgpool2d(network, submod, args, kwargs, name):
1314
# args/kwargs should have already been normalized to kwargs
1415
assert len(args) == 0
1516
input_val = kwargs["input"]
1617

1718
if not isinstance(input_val, trt.tensorrt.ITensor):
18-
raise RuntimeError(f"AdaptiveAvgPool2d received input {input_val} that is not part "
19-
"of the TensorRT region!")
19+
raise RuntimeError(
20+
f"AdaptiveAvgPool2d received input {input_val} that is not part "
21+
"of the TensorRT region!"
22+
)
2023

2124
output_size = extend_mod_attr_to_tuple(submod, "output_size", 2)
22-
stride = (input_val.shape[-2] // output_size[-2], input_val.shape[-1] // output_size[-1])
25+
stride = (
26+
input_val.shape[-2] // output_size[-2],
27+
input_val.shape[-1] // output_size[-1],
28+
)
2329
kernel_size = stride
2430
layer = network.add_pooling(
25-
input=input_val, type=trt.PoolingType.AVERAGE, window_size=kernel_size)
31+
input=input_val, type=trt.PoolingType.AVERAGE, window_size=kernel_size
32+
)
2633
layer.stride = stride
2734
layer.name = name
2835

fx/converters/add.py

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77

88
from .converter_utils import get_dyn_range, mark_as_int8_layer
99

10+
1011
@tensorrt_converter(operator.add)
1112
@tensorrt_converter(torch.add)
1213
def add(network, target, args, kwargs, layer_name):
@@ -20,7 +21,9 @@ def add(network, target, args, kwargs, layer_name):
2021
assert kwargs["alpha"] == 1
2122

2223
if not all(isinstance(arg, trt.tensorrt.ITensor) for arg in [lhs_val, rhs_val]):
23-
raise RuntimeError("add() received an input that is not part of the TensorRT region!")
24+
raise RuntimeError(
25+
"add() received an input that is not part of the TensorRT region!"
26+
)
2427

2528
layer = network.add_elementwise(lhs_val, rhs_val, trt.ElementWiseOperation.SUM)
2629
layer.name = layer_name
@@ -33,7 +36,9 @@ def quantized_add(network, target, args, kwargs, layer_name):
3336
lhs_val, rhs_val = kwargs["qa"], kwargs["qb"]
3437

3538
if not all(isinstance(i, trt.tensorrt.ITensor) for i in [lhs_val, rhs_val]):
36-
raise RuntimeError('Quantized add received an input that is not part of the TensorRT region!')
39+
raise RuntimeError(
40+
"Quantized add received an input that is not part of the TensorRT region!"
41+
)
3742

3843
layer = network.add_elementwise(lhs_val, rhs_val, trt.ElementWiseOperation.SUM)
3944
layer.name = layer_name
@@ -48,15 +53,18 @@ def quantized_add_relu(network, submod, args, kwargs, layer_name):
4853
lhs_val, rhs_val = kwargs["qa"], kwargs["qb"]
4954

5055
if not all(isinstance(i, trt.tensorrt.ITensor) for i in [lhs_val, rhs_val]):
51-
raise RuntimeError('Quantized add_relu received an input that is not part of the TensorRT region!')
56+
raise RuntimeError(
57+
"Quantized add_relu received an input that is not part of the TensorRT region!"
58+
)
5259

5360
layer = network.add_elementwise(lhs_val, rhs_val, trt.ElementWiseOperation.SUM)
5461
layer.name = f"{layer_name}_add"
5562
dyn_range = get_dyn_range(kwargs["scale"], kwargs["zero_point"], torch.quint8)
5663
mark_as_int8_layer(layer, dyn_range)
5764

5865
layer = network.add_activation(
59-
input=layer.get_output(0), type=trt.ActivationType.RELU)
66+
input=layer.get_output(0), type=trt.ActivationType.RELU
67+
)
6068
layer.name = f"{layer_name}_relu"
6169
mark_as_int8_layer(layer, dyn_range)
6270

fx/converters/batchnorm.py

Lines changed: 15 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -11,33 +11,34 @@
1111
get_dyn_range,
1212
)
1313

14+
1415
def common_batchnorm(network, mod, input_val, layer_name, is_quantized):
15-
scale = to_numpy(mod.weight) / np.sqrt(
16-
to_numpy(mod.running_var) + mod.eps
17-
)
18-
bias = (
19-
to_numpy(mod.bias)
20-
- to_numpy(mod.running_mean) * scale
21-
)
16+
scale = to_numpy(mod.weight) / np.sqrt(to_numpy(mod.running_var) + mod.eps)
17+
bias = to_numpy(mod.bias) - to_numpy(mod.running_mean) * scale
2218
power = np.ones_like(scale)
2319

2420
layer = network.add_scale(input_val, trt.ScaleMode.CHANNEL, bias, scale, power)
2521
layer.name = layer_name
2622

2723
if is_quantized:
28-
mark_as_int8_layer(layer, get_dyn_range(mod.scale, mod.zero_point, torch.quint8))
24+
mark_as_int8_layer(
25+
layer, get_dyn_range(mod.scale, mod.zero_point, torch.quint8)
26+
)
2927

3028
return layer.get_output(0)
3129

30+
3231
@tensorrt_converter(torch.nn.modules.batchnorm.BatchNorm2d)
3332
def batchnorm2d(network, submod, args, kwargs, layer_name):
3433
# args/kwargs should have already been normalized to kwargs
3534
assert len(args) == 0
3635
input_val = kwargs["input"]
3736

3837
if not isinstance(input_val, trt.tensorrt.ITensor):
39-
raise RuntimeError(f"BatchNorm2d received input {input_val} that is not part "
40-
"of the TensorRT region!")
38+
raise RuntimeError(
39+
f"BatchNorm2d received input {input_val} that is not part "
40+
"of the TensorRT region!"
41+
)
4142

4243
return common_batchnorm(network, submod, input_val, layer_name, is_quantized=False)
4344

@@ -47,7 +48,9 @@ def quantized_batchnorm2d(network, submod, args, kwargs, layer_name):
4748
input_val = args[0]
4849

4950
if not isinstance(input_val, trt.tensorrt.ITensor):
50-
raise RuntimeError(f'Quantized BatchNorm2d received input {input_val} that is not part '
51-
'of the TensorRT region!')
51+
raise RuntimeError(
52+
f"Quantized BatchNorm2d received input {input_val} that is not part "
53+
"of the TensorRT region!"
54+
)
5255

5356
return common_batchnorm(network, submod, input_val, layer_name, is_quantized=True)

0 commit comments

Comments
 (0)