We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 9db7b18 commit 96dc88eCopy full SHA for 96dc88e
examples/models/llama/export_llama_lib.py
@@ -757,7 +757,7 @@ def _prepare_for_llama_export(llm_config: LlmConfig) -> LLMEdgeManager:
757
preq_embedding_quantize=llm_config.base.preq_embedding_quantize,
758
local_global_attention=llm_config.model.local_global_attention,
759
use_torchao_kernels_linear=llm_config.backend.torchao.use_torchao_kernels_linear,
760
- use_torchao_kernels_tied_embedding=llm_config.backend.torchao.convert_tied_embedding,
+ use_torchao_kernels_tied_embedding=llm_config.backend.torchao.use_torchao_kernels_tied_embedding,
761
)
762
763
0 commit comments