Skip to content

Commit b4e1a42

Browse files
buyukakyuzmreso
authored andcommitted
Freeze layer bug fix
1 parent a9b9ebf commit b4e1a42

File tree

1 file changed

+2
-3
lines changed

1 file changed

+2
-3
lines changed

src/llama_recipes/finetuning.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -166,8 +166,7 @@ def main(**kwargs):
166166
#setting up FSDP if enable_fsdp is enabled
167167
if train_config.enable_fsdp:
168168
if not train_config.use_peft and train_config.freeze_layers:
169-
170-
freeze_transformer_layers(train_config.num_freeze_layers)
169+
freeze_transformer_layers(model, train_config.num_freeze_layers)
171170

172171
mixed_precision_policy, wrapping_policy = get_policies(fsdp_config, rank)
173172
my_auto_wrapping_policy = fsdp_auto_wrap_policy(model, LlamaDecoderLayer)
@@ -217,7 +216,7 @@ def main(**kwargs):
217216
split="test",
218217
)
219218
if not train_config.enable_fsdp or rank == 0:
220-
print(f"--> Validation Set Length = {len(dataset_val)}")
219+
print(f"--> Validation Set Length = {len(dataset_val)}")
221220

222221
if train_config.batching_strategy == "packing":
223222
dataset_train = ConcatDataset(dataset_train, chunk_size=train_config.context_length)

0 commit comments

Comments
 (0)