Skip to content

Commit 22451ce

Browse files
author
Wei
authored
[FX] remove op_lowering_disallow_list and format revert (#1261)
* sync to fb master * reverse _compile.py change * comment line length to use default * update nightly pytorch to 0810 * black formatting * update
1 parent 6f61c6f commit 22451ce

File tree

149 files changed

+2710
-868
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

149 files changed

+2710
-868
lines changed

.circleci/config.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -751,7 +751,7 @@ parameters:
751751
# Nightly platform config
752752
torch-nightly-build:
753753
type: string
754-
default: "1.13.0.dev20220731+cu113"
754+
default: "1.13.0.dev20220810+cu113"
755755
torch-nightly-build-index:
756756
type: string
757757
default: "https://download.pytorch.org/whl/nightly/cu113"

docsrc/conf.py

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,9 @@
9999
}
100100

101101
html_show_sourcelink = True
102-
html_sidebars = {"**": ["logo-text.html", "globaltoc.html", "localtoc.html", "searchbox.html"]}
102+
html_sidebars = {
103+
"**": ["logo-text.html", "globaltoc.html", "localtoc.html", "searchbox.html"]
104+
}
103105

104106
# extensions.append("sphinx_material")
105107
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
@@ -183,7 +185,15 @@ def handle_item(fieldarg, content):
183185
typename = typename.replace("long", "python:long")
184186
typename = typename.replace("float", "python:float")
185187
typename = typename.replace("type", "python:type")
186-
par.extend(self.make_xrefs(self.typerolename, domain, typename, addnodes.literal_emphasis, **kw))
188+
par.extend(
189+
self.make_xrefs(
190+
self.typerolename,
191+
domain,
192+
typename,
193+
addnodes.literal_emphasis,
194+
**kw
195+
)
196+
)
187197
else:
188198
par += fieldtype
189199
par += nodes.Text(")")

examples/custom_converters/elu_model.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,9 @@
22
import torch_tensorrt
33

44
# After "python3 setup install", you should find this .so file under generated "build" directory
5-
torch.ops.load_library("./elu_converter/build/lib.linux-x86_64-3.6/elu_converter.cpython-36m-x86_64-linux-gnu.so")
5+
torch.ops.load_library(
6+
"./elu_converter/build/lib.linux-x86_64-3.6/elu_converter.cpython-36m-x86_64-linux-gnu.so"
7+
)
68

79

810
class Elu(torch.nn.Module):

examples/fx/fx2trt_example.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -141,4 +141,6 @@ def get_input(self, inputs):
141141

142142
# Make sure the results match
143143
regular_model_output = model(*inputs)
144-
torch.testing.assert_close(reload_model_output, regular_model_output, atol=3e-3, rtol=1e-2)
144+
torch.testing.assert_close(
145+
reload_model_output, regular_model_output, atol=3e-3, rtol=1e-2
146+
)

examples/fx/hugging_face_torchdynamo_example.py

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -353,14 +353,18 @@ def run_all_eval(args, optimize_ctx, optimize_name, dtype):
353353
eval_inputs = (input_ids,)
354354

355355
# Correctness check
356-
is_accurate = check_correctness(args, model, eval_inputs, optimize_ctx, optimize_name)
356+
is_accurate = check_correctness(
357+
args, model, eval_inputs, optimize_ctx, optimize_name
358+
)
357359
# Profile eager
358360
t, m = bench_model_eval(args, "eager", model, eval_inputs, NullContext())
359361
results.append(create_record(model_name, dtype, is_accurate, "eager", t, m))
360362

361363
# Profile Dynamo nvfuser
362364
t, m = bench_model_eval(args, optimize_name, model, eval_inputs, optimize_ctx)
363-
results.append(create_record(model_name, dtype, is_accurate, optimize_name, t, m))
365+
results.append(
366+
create_record(model_name, dtype, is_accurate, optimize_name, t, m)
367+
)
364368

365369
# calculate relative improvements
366370
base_r = results[-2]
@@ -412,7 +416,9 @@ def main():
412416
if optimize_name == "dynamo_fx2trt_fp32":
413417
experiment = partial(experiment, dtype=torch.float32)
414418

415-
experiment = partial(experiment, optimize_ctx=optimize_ctx, optimize_name=optimize_name)
419+
experiment = partial(
420+
experiment, optimize_ctx=optimize_ctx, optimize_name=optimize_name
421+
)
416422
experiment(args)
417423

418424

examples/fx/lower_example.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,10 @@ def benchmark(
125125
),
126126
]
127127

128-
results = [run_configuration_benchmark(deepcopy(model), inputs, conf_) for conf_ in configurations]
128+
results = [
129+
run_configuration_benchmark(deepcopy(model), inputs, conf_)
130+
for conf_ in configurations
131+
]
129132

130133
for res in results:
131134
print(res.format())

examples/fx/quantized_resnet_test.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,9 @@ def build_int8_trt_implicit_quant(rn18):
108108
InputTensorSpec.from_tensors([data]),
109109
logger_level=trt.Logger.VERBOSE,
110110
)
111-
interpreter_result = interp.run(lower_precision=LowerPrecision.INT8, strict_type_constraints=True)
111+
interpreter_result = interp.run(
112+
lower_precision=LowerPrecision.INT8, strict_type_constraints=True
113+
)
112114
trt_mod = TRTModule(
113115
interpreter_result.engine,
114116
interpreter_result.input_names,

examples/fx/torch_trt_simple_example.py

Lines changed: 36 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -12,35 +12,63 @@ def test_torch_tensorrt(model, inputs):
1212
# fp32 test
1313
with torch.inference_mode():
1414
ref_fp32 = model_ts(*inputs_ts)
15-
trt_ts_module = torch_tensorrt.compile(model_ts, inputs=inputs_ts, enabled_precisions={torch.float32})
15+
trt_ts_module = torch_tensorrt.compile(
16+
model_ts, inputs=inputs_ts, enabled_precisions={torch.float32}
17+
)
1618
result_fp32 = trt_ts_module(*inputs_ts)
17-
assert torch.nn.functional.cosine_similarity(ref_fp32.flatten(), result_fp32.flatten(), dim=0) > 0.9999
19+
assert (
20+
torch.nn.functional.cosine_similarity(
21+
ref_fp32.flatten(), result_fp32.flatten(), dim=0
22+
)
23+
> 0.9999
24+
)
1825
# fp16 test
1926
model_ts = model_ts.half()
2027
inputs_ts = [i.cuda().half() for i in inputs_ts]
2128
with torch.inference_mode():
2229
ref_fp16 = model_ts(*inputs_ts)
23-
trt_ts_module = torch_tensorrt.compile(model_ts, inputs=inputs_ts, enabled_precisions={torch.float16})
30+
trt_ts_module = torch_tensorrt.compile(
31+
model_ts, inputs=inputs_ts, enabled_precisions={torch.float16}
32+
)
2433
result_fp16 = trt_ts_module(*inputs_ts)
25-
assert torch.nn.functional.cosine_similarity(ref_fp16.flatten(), result_fp16.flatten(), dim=0) > 0.99
34+
assert (
35+
torch.nn.functional.cosine_similarity(
36+
ref_fp16.flatten(), result_fp16.flatten(), dim=0
37+
)
38+
> 0.99
39+
)
2640

2741
# FX path
2842
model_fx = copy.deepcopy(model)
2943
inputs_fx = copy.deepcopy(inputs)
3044
# fp32 test
3145
with torch.inference_mode():
3246
ref_fp32 = model_fx(*inputs_fx)
33-
trt_fx_module = torch_tensorrt.compile(model_fx, ir="fx", inputs=inputs_fx, enabled_precisions={torch.float32})
47+
trt_fx_module = torch_tensorrt.compile(
48+
model_fx, ir="fx", inputs=inputs_fx, enabled_precisions={torch.float32}
49+
)
3450
result_fp32 = trt_fx_module(*inputs_fx)
35-
assert torch.nn.functional.cosine_similarity(ref_fp32.flatten(), result_fp32.flatten(), dim=0) > 0.9999
51+
assert (
52+
torch.nn.functional.cosine_similarity(
53+
ref_fp32.flatten(), result_fp32.flatten(), dim=0
54+
)
55+
> 0.9999
56+
)
3657
# fp16 test
3758
model_fx = model_fx.cuda().half()
3859
inputs_fx = [i.cuda().half() for i in inputs_fx]
3960
with torch.inference_mode():
4061
ref_fp16 = model_fx(*inputs_fx)
41-
trt_fx_module = torch_tensorrt.compile(model_fx, ir="fx", inputs=inputs_fx, enabled_precisions={torch.float16})
62+
trt_fx_module = torch_tensorrt.compile(
63+
model_fx, ir="fx", inputs=inputs_fx, enabled_precisions={torch.float16}
64+
)
4265
result_fp16 = trt_fx_module(*inputs_fx)
43-
assert torch.nn.functional.cosine_similarity(ref_fp16.flatten(), result_fp16.flatten(), dim=0) > 0.99
66+
assert (
67+
torch.nn.functional.cosine_similarity(
68+
ref_fp16.flatten(), result_fp16.flatten(), dim=0
69+
)
70+
> 0.99
71+
)
4472

4573

4674
if __name__ == "__main__":

examples/fx/torchdynamo_example.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -142,7 +142,10 @@ def benchmark(
142142
),
143143
]
144144

145-
results = [run_configuration_benchmark(deepcopy(model), inputs, conf_) for conf_ in configurations]
145+
results = [
146+
run_configuration_benchmark(deepcopy(model), inputs, conf_)
147+
for conf_ in configurations
148+
]
146149

147150
for res in results:
148151
print(res.format())

examples/int8/training/vgg16/export_ckpt.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,9 @@ def test(model, dataloader, crit):
7575
),
7676
)
7777

78-
testing_dataloader = torch.utils.data.DataLoader(testing_dataset, batch_size=32, shuffle=False, num_workers=2)
78+
testing_dataloader = torch.utils.data.DataLoader(
79+
testing_dataset, batch_size=32, shuffle=False, num_workers=2
80+
)
7981

8082
crit = torch.nn.CrossEntropyLoss()
8183

0 commit comments

Comments
 (0)