Skip to content

Commit 20a0e22

Browse files
authored
Revert "[Dynamo] Allow inlining into AO quantization modules (pytorch#152934)" (pytorch#158677)
This reverts commit 20e2ca3.
1 parent 9167ac8 commit 20a0e22

File tree

2 files changed

+0
-16
lines changed

2 files changed

+0
-16
lines changed

test/dynamo/test_repros.py

Lines changed: 0 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -6450,21 +6450,6 @@ def inject_parameters(module, cls):
64506450
with torch.no_grad():
64516451
model(x)
64526452

6453-
def test_ao_fake_quantize_tracing(self):
6454-
import torch.ao.quantization.fake_quantize
6455-
6456-
q = torch.ao.quantization.FusedMovingAvgObsFakeQuantize()
6457-
6458-
def fn(x):
6459-
return q(x)
6460-
6461-
x = torch.ones(2, 2)
6462-
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
6463-
res = opt_fn(x)
6464-
eager_res = fn(x)
6465-
6466-
self.assertEqual(res, eager_res)
6467-
64686453
def test_typed_dict(self):
64696454
class LlavaImagePixelInputs(TypedDict):
64706455
type: Literal["pixel_values"]

torch/_dynamo/trace_rules.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3393,7 +3393,6 @@ def _module_dir(m: types.ModuleType):
33933393
"torch._tensor",
33943394
"torch.amp.autocast_mode",
33953395
"torch.ao.nn",
3396-
"torch.ao.quantization.fake_quantize",
33973396
"torch.autograd.function",
33983397
"torch.backends.cuda",
33993398
"torch.cuda.amp.autocast_mode",

0 commit comments

Comments
 (0)