We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent a076272 commit 81f3213Copy full SHA for 81f3213
examples/models/llama/export_llama_lib.py
@@ -699,19 +699,6 @@ def _validate_args(args):
699
"Shared embedding is only supported with torchao quantization."
700
)
701
702
- if (
703
- args.quantization_mode is not None
704
- and args.quantization_mode.startswith("torchao:")
705
- ) or (
706
- args.embedding_quantize is not None
707
- and args.embedding_quantize.startswith("torchao:")
708
- ):
709
- if args.enable_dynamic_shape:
710
- raise ValueError(
711
- "Dynamic shape is not currently supported with torchao ops. Please use --disable_dynamic_shape."
712
- "If you need this feature, please file an issue."
713
- )
714
-
715
716
def _to_edge_and_lower_llama_xnnpack(
717
builder_exported,
0 commit comments