Skip to content

Commit 08eb5be

Browse files
authored
Eliminate confusion about meta weights during quantization (#8487)
1 parent bc497a0 commit 08eb5be

File tree

1 file changed

+10
-7
lines changed

1 file changed

+10
-7
lines changed

examples/models/llama/model.py

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -244,13 +244,16 @@ def __init__(self, **kwargs):
244244
strict=False,
245245
assign=True,
246246
) # self.model_ = Transformer(gptconf)
247-
if kwargs.get("verbose", False):
248-
print("============= missing keys ================")
249-
print(missing)
250-
print("============= /missing ================")
251-
print("============= unexpected keys ================")
252-
print(unexpected)
253-
print("============= /unexpected ================")
247+
248+
if missing:
249+
missing_weights = [fqn for fqn in missing if fqn.endswith(".weight")]
250+
if missing_weights:
251+
raise ValueError(
252+
f"The provided checkpoint is missing the following weights that are expected by the model: {missing_weights}. Please fix the fqn's in your checkpoint to match."
253+
)
254+
if unexpected:
255+
if kwargs.get("verbose", False):
256+
print(f"Unexpected keys: {unexpected}")
254257

255258
# Prune the input layer if input_prune_map is provided
256259
if input_prune_map is not None:

0 commit comments

Comments
 (0)