Skip to content

Commit a6cac5d

Browse files
authored
Update lr_scheduler options in config.qmd to include additional scheduling strategies for improved training flexibility. (axolotl-ai-cloud#2636) [skip ci]
1 parent b71c0e3 commit a6cac5d

File tree

1 file changed

+1
-1
lines changed

1 file changed

+1
-1
lines changed

docs/config.qmd

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -547,7 +547,7 @@ gradient_checkpointing: false
547547
early_stopping_patience: 3
548548

549549
# Specify a scheduler and kwargs to use with the optimizer
550-
lr_scheduler: # 'one_cycle' | 'rex' | 'log_sweep' | empty for cosine
550+
lr_scheduler: # 'one_cycle' | 'rex' | 'log_sweep' | 'linear' | 'cosine_with_restarts' | 'polynomial' | 'constant' | 'constant_with_warmup' | 'inverse_sqrt' | 'reduce_lr_on_plateau' | 'cosine_with_min_lr' | 'warmup_stable_decay' | empty for cosine
551551
lr_scheduler_kwargs:
552552
cosine_min_lr_ratio: # decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of peak lr
553553
cosine_constant_lr_ratio: # freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means start cosine_min_lr at 80% of training step (https://arxiv.org/pdf/2308.04014.pdf)

0 commit comments

Comments
 (0)