Skip to content

Commit f1815a8

Browse files
committed
up
1 parent e5c7c71 commit f1815a8

File tree

1 file changed

+7
-3
lines changed

1 file changed

+7
-3
lines changed

examples/models/llama/export_llama_lib.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -598,9 +598,13 @@ def _validate_args(args):
598598
if args.num_sharding > 0 and not args.qnn:
599599
raise ValueError("Model shard is only supported with qnn backend now.")
600600

601-
if args.quantization_mode.startswith(
602-
"torchao:"
603-
) or args.embedding_quantize.startswith("torchao:"):
601+
if (
602+
args.quantization_mode is not None
603+
and args.quantization_mode.startswith("torchao:")
604+
) or (
605+
args.embedding_quantize is not None
606+
and args.embedding_quantize.startswith("torchao:")
607+
):
604608
if args.enable_dynamic_shape:
605609
raise ValueError(
606610
"Dynamic shape is not currently supported with torchao ops. Please use --disable_dynamic_shape."

0 commit comments

Comments
 (0)