We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 70abfb4 commit d8ef6f8Copy full SHA for d8ef6f8
examples/llm_ptq/example_utils.py
@@ -204,9 +204,7 @@ def get_model(
204
if auto_model_module != AutoModelForCausalLM:
205
model_kwargs2.pop("trust_remote_code", None)
206
model_kwargs2["torch_dtype"] = torch_dtype
207
- # DeciLMForCausalLM does not support max_memory argument
208
- if "architectures" in hf_config and "DeciLMForCausalLM" in hf_config.architectures:
209
- model_kwargs2.pop("max_memory", None)
+ model_kwargs2.pop("max_memory", None)
210
model = from_config(hf_config, **model_kwargs2)
211
212
max_memory = get_max_memory()
0 commit comments