Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion examples/alora_finetuning/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ trainer = Trainer(
model=peft_model,
train_dataset=dataset,
dataset_text_field="text",
max_seq_length=2048,
max_length=2048,
tokenizer=tokenizer,
data_collator=data_collator,
)
Expand Down
2 changes: 1 addition & 1 deletion examples/bone_finetuning/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ peft_model.print_trainable_parameters()

dataset = load_dataset("imdb", split="train[:1%]")

training_args = SFTConfig(dataset_text_field="text", max_seq_length=128)
training_args = SFTConfig(dataset_text_field="text", max_length=128)
trainer = SFTTrainer(
model=peft_model,
args=training_args,
Expand Down
2 changes: 1 addition & 1 deletion examples/corda_finetuning/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ preprocess_corda(model, lora_config, run_model=run_model)
peft_model = get_peft_model(model, lora_config)
peft_model.print_trainable_parameters()

training_args = SFTConfig(dataset_text_field="text", max_seq_length=128)
training_args = SFTConfig(dataset_text_field="text", max_length=128)
trainer = SFTTrainer(
model=peft_model,
args=training_args,
Expand Down
6 changes: 3 additions & 3 deletions examples/delora_finetuning/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ peft_model.print_trainable_parameters()

dataset = load_dataset("imdb", split="train[:1%]")

training_args = SFTConfig(dataset_text_field="text", max_seq_length=128)
training_args = SFTConfig(dataset_text_field="text", max_length=128)
trainer = SFTTrainer(
model=peft_model,
args=training_args,
Expand All @@ -52,7 +52,7 @@ peft_model = PeftModel.from_pretrained(model, "delora-llama-3-8b")
## Advanced Usage
In this script the default DeLoRA layers are the query and value layers of the Llama model. Adding adapters on more layers will increase memory usage. If you wish to choose a different set of layers for DeLoRA to be applied on, you can simply define it using:
```bash
python examples/delora_finetuning/delora_finetuning.py --base_model meta-llama/Meta-Llama-3-8B --delora_target_modules "q_proj,k_proj,v_proj,o_proj"
python examples/delora_finetuning/delora_finetuning.py --base_model meta-llama/Meta-Llama-3-8B --target_modules "q_proj,k_proj,v_proj,o_proj"
```

Using different lambdas for different layers is also possible by setting `lambda_pattern`.
Expand All @@ -74,7 +74,7 @@ python delora_finetuning.py \
--rank 32 \
--delora_lambda 15 \
--module_dropout 0.1 \
--delora_target_modules "q_proj,v_proj" \
--target_modules "q_proj,v_proj" \
--hub_model_id "YOUR_HF_REPO" \
--push_to_hub
```
Expand Down
2 changes: 1 addition & 1 deletion examples/dora_finetuning/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ trainer = transformers.Trainer(
model=peft_model,
train_dataset=dataset,
dataset_text_field="text",
max_seq_length=2048,
max_length=2048,
tokenizer=tokenizer,
)
trainer.train()
Expand Down
2 changes: 1 addition & 1 deletion examples/lorafa_finetune/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ trainer = transformers.Trainer(
model=peft_model,
train_dataset=dataset,
dataset_text_field="text",
max_seq_length=2048,
max_length=2048,
processing_class=tokenizer,
optimizers=(optimizer, None),
)
Expand Down
2 changes: 1 addition & 1 deletion examples/miss_finetuning/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ peft_model.print_trainable_parameters()

dataset = load_dataset("imdb", split="train[:1%]")

training_args = SFTConfig(dataset_text_field="text", max_seq_length=128)
training_args = SFTConfig(dataset_text_field="text", max_length=128)
trainer = SFTTrainer(
model=peft_model,
args=training_args,
Expand Down
2 changes: 1 addition & 1 deletion examples/olora_finetuning/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ lora_config = LoraConfig(
init_lora_weights="olora"
)
peft_model = get_peft_model(model, lora_config)
training_args = SFTConfig(dataset_text_field="text", max_seq_length=128)
training_args = SFTConfig(dataset_text_field="text", max_length=128)
trainer = SFTTrainer(
model=peft_model,
train_dataset=dataset,
Expand Down
2 changes: 1 addition & 1 deletion examples/pissa_finetuning/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ peft_model.print_trainable_parameters()

dataset = load_dataset("imdb", split="train[:1%]")

training_args = SFTConfig(dataset_text_field="text", max_seq_length=128)
training_args = SFTConfig(dataset_text_field="text", max_length=128)
trainer = SFTTrainer(
model=peft_model,
args=training_args,
Expand Down
2 changes: 1 addition & 1 deletion examples/randlora_finetuning/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ trainer = transformers.Trainer(
model=peft_model,
train_dataset=dataset,
dataset_text_field="text",
max_seq_length=2048,
max_length=2048,
processing_class=tokenizer,
)
trainer.train()
Expand Down
2 changes: 1 addition & 1 deletion examples/road_finetuning/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ trainer = transformers.Trainer(
model=peft_model,
train_dataset=dataset,
dataset_text_field="text",
max_seq_length=2048,
max_length=2048,
tokenizer=tokenizer,
)
trainer.train()
Expand Down
2 changes: 1 addition & 1 deletion examples/shira_finetuning/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ shira_config = ShiraConfig(
r=32,
)
peft_model = get_peft_model(model, shira_config)
training_args = SFTConfig(dataset_text_field="text", max_seq_length=128)
training_args = SFTConfig(dataset_text_field="text", max_length=128)
trainer = SFTTrainer(
model=peft_model,
train_dataset=dataset,
Expand Down
2 changes: 1 addition & 1 deletion examples/waveft_finetuning/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ waveft_config = WaveFTConfig(
n_frequency=2592,
)
peft_model = get_peft_model(model, waveft_config)
training_args = SFTConfig(dataset_text_field="text", max_seq_length=128)
training_args = SFTConfig(dataset_text_field="text", max_length=128)
trainer = SFTTrainer(
model=peft_model,
train_dataset=dataset,
Expand Down
Loading