Skip to content

Commit d2fe46f

Browse files
committed
[Bug fix] remove deom init in hf_ptq
Signed-off-by: realAsma <[email protected]>
1 parent 0178562 commit d2fe46f

File tree

1 file changed

+1
-3
lines changed

1 file changed

+1
-3
lines changed

examples/llm_ptq/example_utils.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -206,9 +206,7 @@ def get_model(
206206
if auto_model_module != AutoModelForCausalLM:
207207
model_kwargs2.pop("trust_remote_code", None)
208208
model_kwargs2["torch_dtype"] = torch_dtype
209-
# DeciLMForCausalLM does not support max_memory argument
210-
if "architectures" in hf_config and "DeciLMForCausalLM" in hf_config.architectures:
211-
model_kwargs2.pop("max_memory", None)
209+
model_kwargs2.pop("max_memory", None)
212210
model = from_config(hf_config, **model_kwargs2)
213211

214212
max_memory = get_max_memory()

0 commit comments

Comments
 (0)