|
| 1 | + |
| 2 | +# The path to the base model's checkpoint directory to load for finetuning. (type: <class 'Path'>, default: checkpoints/stabilityai/stablelm-base-alpha-3b) |
| 3 | +checkpoint_dir: checkpoints/unsloth/Mistral-7B-v0.2 |
| 4 | + |
| 5 | +# Directory in which to save checkpoints and logs. (type: <class 'Path'>, default: out/lora) |
| 6 | +out_dir: out/finetune/qlora-mistral-7b |
| 7 | + |
| 8 | +# The precision to use for finetuning. Possible choices: "bf16-true", "bf16-mixed", "32-true". (type: Optional[str], default: null) |
| 9 | +precision: bf16-true |
| 10 | + |
| 11 | +# If set, quantize the model with this algorithm. See ``tutorials/quantize.md`` for more information. (type: Optional[Literal['nf4', 'nf4-dq', 'fp4', 'fp4-dq', 'int8-training']], default: null) |
| 12 | +quantize: bnb.nf4 |
| 13 | + |
| 14 | +# How many devices/GPUs to use. (type: Union[int, str], default: 1) |
| 15 | +devices: 1 |
| 16 | + |
| 17 | +# The LoRA rank. (type: int, default: 8) |
| 18 | +lora_r: 32 |
| 19 | + |
| 20 | +# The LoRA alpha. (type: int, default: 16) |
| 21 | +lora_alpha: 16 |
| 22 | + |
| 23 | +# The LoRA dropout value. (type: float, default: 0.05) |
| 24 | +lora_dropout: 0.05 |
| 25 | + |
| 26 | +# Whether to apply LoRA to the query weights in attention. (type: bool, default: True) |
| 27 | +lora_query: true |
| 28 | + |
| 29 | +# Whether to apply LoRA to the key weights in attention. (type: bool, default: False) |
| 30 | +lora_key: false |
| 31 | + |
| 32 | +# Whether to apply LoRA to the value weights in attention. (type: bool, default: True) |
| 33 | +lora_value: true |
| 34 | + |
| 35 | +# Whether to apply LoRA to the output projection in the attention block. (type: bool, default: False) |
| 36 | +lora_projection: false |
| 37 | + |
| 38 | +# Whether to apply LoRA to the weights of the MLP in the attention block. (type: bool, default: False) |
| 39 | +lora_mlp: false |
| 40 | + |
| 41 | +# Whether to apply LoRA to output head in GPT. (type: bool, default: False) |
| 42 | +lora_head: false |
| 43 | + |
| 44 | +# Data-related arguments. If not provided, the default is ``litgpt.data.Alpaca``. |
| 45 | +data: |
| 46 | + class_path: litgpt.data.Alpaca2k |
| 47 | + init_args: |
| 48 | + mask_prompt: false |
| 49 | + val_split_fraction: 0.05 |
| 50 | + prompt_style: alpaca |
| 51 | + ignore_index: -100 |
| 52 | + seed: 42 |
| 53 | + num_workers: 4 |
| 54 | + download_dir: data/alpaca2k |
| 55 | + |
| 56 | +# Training-related arguments. See ``litgpt.args.TrainArgs`` for details |
| 57 | +train: |
| 58 | + |
| 59 | + # Number of optimizer steps between saving checkpoints (type: Optional[int], default: 1000) |
| 60 | + save_interval: 200 |
| 61 | + |
| 62 | + # Number of iterations between logging calls (type: int, default: 1) |
| 63 | + log_interval: 1 |
| 64 | + |
| 65 | + # Number of samples between optimizer steps across data-parallel ranks (type: int, default: 128) |
| 66 | + global_batch_size: 8 |
| 67 | + |
| 68 | + # Number of samples per data-parallel rank (type: int, default: 4) |
| 69 | + micro_batch_size: 2 |
| 70 | + |
| 71 | + # Number of iterations with learning rate warmup active (type: int, default: 100) |
| 72 | + lr_warmup_steps: 10 |
| 73 | + |
| 74 | + # Number of epochs to train on (type: Optional[int], default: 5) |
| 75 | + epochs: 4 |
| 76 | + |
| 77 | + # Total number of tokens to train on (type: Optional[int], default: null) |
| 78 | + max_tokens: |
| 79 | + |
| 80 | + # Limits the number of optimizer steps to run (type: Optional[int], default: null) |
| 81 | + max_steps: |
| 82 | + |
| 83 | + # Limits the length of samples (type: Optional[int], default: null) |
| 84 | + max_seq_length: 512 |
| 85 | + |
| 86 | + # Whether to tie the embedding weights with the language modeling head weights (type: Optional[bool], default: null) |
| 87 | + tie_embeddings: |
| 88 | + |
| 89 | + # (type: float, default: 0.0003) |
| 90 | + learning_rate: 0.0002 |
| 91 | + |
| 92 | + # (type: float, default: 0.02) |
| 93 | + weight_decay: 0.0 |
| 94 | + |
| 95 | + # (type: float, default: 0.9) |
| 96 | + beta1: 0.9 |
| 97 | + |
| 98 | + # (type: float, default: 0.95) |
| 99 | + beta2: 0.95 |
| 100 | + |
| 101 | + # (type: Optional[float], default: null) |
| 102 | + max_norm: |
| 103 | + |
| 104 | + # (type: float, default: 6e-05) |
| 105 | + min_lr: 6.0e-05 |
| 106 | + |
| 107 | +# Evaluation-related arguments. See ``litgpt.args.EvalArgs`` for details |
| 108 | +eval: |
| 109 | + |
| 110 | + # Number of optimizer steps between evaluation calls (type: int, default: 100) |
| 111 | + interval: 100 |
| 112 | + |
| 113 | + # Number of tokens to generate (type: Optional[int], default: 100) |
| 114 | + max_new_tokens: 100 |
| 115 | + |
| 116 | + # Number of iterations (type: int, default: 100) |
| 117 | + max_iters: 100 |
| 118 | + |
| 119 | +# The name of the logger to send metrics to. (type: Literal['wandb', 'tensorboard', 'csv'], default: csv) |
| 120 | +logger_name: csv |
| 121 | + |
| 122 | +# The random seed to use for reproducibility. (type: int, default: 1337) |
| 123 | +seed: 1337 |
0 commit comments