We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 0178562 commit d2fe46fCopy full SHA for d2fe46f
examples/llm_ptq/example_utils.py
@@ -206,9 +206,7 @@ def get_model(
206
if auto_model_module != AutoModelForCausalLM:
207
model_kwargs2.pop("trust_remote_code", None)
208
model_kwargs2["torch_dtype"] = torch_dtype
209
- # DeciLMForCausalLM does not support max_memory argument
210
- if "architectures" in hf_config and "DeciLMForCausalLM" in hf_config.architectures:
211
- model_kwargs2.pop("max_memory", None)
+ model_kwargs2.pop("max_memory", None)
212
model = from_config(hf_config, **model_kwargs2)
213
214
max_memory = get_max_memory()
0 commit comments