File tree Expand file tree Collapse file tree 1 file changed +8
-2
lines changed
Expand file tree Collapse file tree 1 file changed +8
-2
lines changed Original file line number Diff line number Diff line change 3535
3636from executorch .extension .llm .export .export_passes import RemoveRedundantTransposes
3737from pytorch_tokenizers import get_tokenizer
38+
39+ # TODO: remove these once pt2e migration from torch.ao to torchao is complete
3840from torch .ao .quantization .quantizer import Quantizer as TorchQuantizer
3941from torch .ao .quantization .quantizer .composable_quantizer import (
4042 ComposableQuantizer as TorchComposableQuantizer ,
@@ -374,10 +376,14 @@ def pt2e_quantize(
374376 if self .verbose :
375377 logging .info (f"Applied quantizers: { quantizers } " )
376378
377- if any (isinstance (q , Quantizer ) for q in quantizers ):
379+ if all (isinstance (q , Quantizer ) for q in quantizers ):
378380 composed_quantizer = ComposableQuantizer (quantizers )
379- else :
381+ elif all ( isinstance ( q , TorchQuantizer ) for q in quantizers ) :
380382 composed_quantizer = TorchComposableQuantizer (quantizers )
383+ else :
384+ raise ValueError (
385+ "Quantizers must be either Quantizer or TorchQuantizer"
386+ )
381387
382388 assert (
383389 self .pre_autograd_graph_module is not None
You can’t perform that action at this time.
0 commit comments