We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent e5c7c71 commit f1815a8Copy full SHA for f1815a8
examples/models/llama/export_llama_lib.py
@@ -598,9 +598,13 @@ def _validate_args(args):
598
if args.num_sharding > 0 and not args.qnn:
599
raise ValueError("Model shard is only supported with qnn backend now.")
600
601
- if args.quantization_mode.startswith(
602
- "torchao:"
603
- ) or args.embedding_quantize.startswith("torchao:"):
+ if (
+ args.quantization_mode is not None
+ and args.quantization_mode.startswith("torchao:")
604
+ ) or (
605
+ args.embedding_quantize is not None
606
+ and args.embedding_quantize.startswith("torchao:")
607
+ ):
608
if args.enable_dynamic_shape:
609
raise ValueError(
610
"Dynamic shape is not currently supported with torchao ops. Please use --disable_dynamic_shape."
0 commit comments