Skip to content

Commit 81f3213

Browse files
committed
up
1 parent a076272 commit 81f3213

File tree

1 file changed

+0
-13
lines changed

1 file changed

+0
-13
lines changed

examples/models/llama/export_llama_lib.py

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -699,19 +699,6 @@ def _validate_args(args):
699699
"Shared embedding is only supported with torchao quantization."
700700
)
701701

702-
if (
703-
args.quantization_mode is not None
704-
and args.quantization_mode.startswith("torchao:")
705-
) or (
706-
args.embedding_quantize is not None
707-
and args.embedding_quantize.startswith("torchao:")
708-
):
709-
if args.enable_dynamic_shape:
710-
raise ValueError(
711-
"Dynamic shape is not currently supported with torchao ops. Please use --disable_dynamic_shape."
712-
"If you need this feature, please file an issue."
713-
)
714-
715702

716703
def _to_edge_and_lower_llama_xnnpack(
717704
builder_exported,

0 commit comments

Comments
 (0)