|
| 1 | +# Bradley-Terry (BT) Reward Model Training Configuration |
| 2 | +rm: |
| 3 | + ## total number of steps to train will equal |
| 4 | + ## min((max_num_epochs * len(train_dataloader)), max_num_steps) |
| 5 | + max_num_epochs: 1 |
| 6 | + max_num_steps: -1 # by default, train for 1 epoch |
| 7 | + |
| 8 | + val_period: 16 |
| 9 | + val_batches: -1 |
| 10 | + val_global_batch_size: 32 |
| 11 | + val_micro_batch_size: 1 |
| 12 | + val_at_start: false |
| 13 | + seed: 42 |
| 14 | + |
| 15 | +checkpointing: |
| 16 | + enabled: true |
| 17 | + checkpoint_dir: "results/rm" |
| 18 | + metric_name: "val_loss" |
| 19 | + higher_is_better: false |
| 20 | + keep_top_k: 3 |
| 21 | + save_period: ${rm.val_period} |
| 22 | + |
| 23 | +policy: |
| 24 | + model_name: "meta-llama/Llama-3.2-1B-Instruct" |
| 25 | + tokenizer: |
| 26 | + name: ${policy.model_name} ## specify if you'd like to use a tokenizer different from the model's default |
| 27 | + # We don't use the "default" chat template because the Llama tokenizer inserts the current |
| 28 | + # date in the system prompt, which could make the reward model's output date-dependent. |
| 29 | + chat_template: "{{- bos_token }}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n{%- else %}\n {%- set system_message = '' %}\n{%- endif %}\n\n{#- System message #}\n{{- '<|start_header_id|>system<|end_header_id|>\n\n' }}\n{{- system_message }}\n{{- '<|eot_id|>' }}\n\n{%- for message in messages %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' + message['content'] | trim + '<|eot_id|>' }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id>\n\n' }}\n{%- endif %}" |
| 30 | + train_global_batch_size: 128 |
| 31 | + train_micro_batch_size: 1 |
| 32 | + max_total_sequence_length: 8192 |
| 33 | + precision: "bfloat16" |
| 34 | + fsdp_offload_enabled: false |
| 35 | + activation_checkpointing_enabled: false |
| 36 | + |
| 37 | + reward_model_cfg: |
| 38 | + enabled: true # loads model as a Reward Model (do not change) |
| 39 | + reward_model_type: "bradley_terry" # only "bradley_terry" is currently supported |
| 40 | + |
| 41 | + dtensor_cfg: |
| 42 | + enabled: true |
| 43 | + cpu_offload: false |
| 44 | + sequence_parallel: false |
| 45 | + activation_checkpointing: false |
| 46 | + tensor_parallel_size: 1 |
| 47 | + context_parallel_size: 1 |
| 48 | + custom_parallel_plan: null |
| 49 | + |
| 50 | + dynamic_batching: |
| 51 | + enabled: false |
| 52 | + |
| 53 | + sequence_packing: |
| 54 | + enabled: false |
| 55 | + |
| 56 | + # makes the training sequence length divisible by the tensor parallel size |
| 57 | + # this is useful for sequence parallel training |
| 58 | + make_sequence_length_divisible_by: ${policy.dtensor_cfg.tensor_parallel_size} |
| 59 | + max_grad_norm: 1.0 |
| 60 | + |
| 61 | + optimizer: |
| 62 | + name: "torch.optim.AdamW" |
| 63 | + kwargs: |
| 64 | + lr: 2.0e-6 |
| 65 | + weight_decay: 0.1 |
| 66 | + betas: [0.9, 0.98] |
| 67 | + eps: 1e-5 |
| 68 | + # when using Dtensor, we need to set `foreach` and `fused` to false |
| 69 | + foreach: false |
| 70 | + fused: false |
| 71 | + |
| 72 | + ## ignored since enabled=false, but needed for testing purposes |
| 73 | + megatron_cfg: |
| 74 | + enabled: false |
| 75 | + empty_unused_memory_level: 1 |
| 76 | + activation_checkpointing: false |
| 77 | + tensor_model_parallel_size: 2 |
| 78 | + pipeline_model_parallel_size: 2 |
| 79 | + context_parallel_size: 1 |
| 80 | + pipeline_dtype: ${policy.precision} |
| 81 | + num_layers_in_first_pipeline_stage: null |
| 82 | + num_layers_in_last_pipeline_stage: null |
| 83 | + sequence_parallel: false |
| 84 | + |
| 85 | + optimizer: |
| 86 | + optimizer: "adam" |
| 87 | + lr: 2.0e-6 |
| 88 | + min_lr: 1.9999e-6 |
| 89 | + weight_decay: 0.1 |
| 90 | + bf16: false |
| 91 | + fp16: false |
| 92 | + params_dtype: "float32" |
| 93 | + |
| 94 | + #adam |
| 95 | + adam_beta1: 0.9 |
| 96 | + adam_beta2: 0.98 |
| 97 | + adam_eps: 1e-5 |
| 98 | + |
| 99 | + #sgd |
| 100 | + sgd_momentum: 0.9 |
| 101 | + |
| 102 | + #distributed optimizer |
| 103 | + use_distributed_optimizer: true |
| 104 | + use_precision_aware_optimizer: true |
| 105 | + |
| 106 | + clip_grad: ${policy.max_grad_norm} |
| 107 | + |
| 108 | + scheduler: |
| 109 | + start_weight_decay: ${policy.megatron_cfg.optimizer.weight_decay} |
| 110 | + end_weight_decay: ${policy.megatron_cfg.optimizer.weight_decay} |
| 111 | + weight_decay_incr_style: "constant" |
| 112 | + lr_decay_style: "constant" |
| 113 | + lr_decay_iters: null |
| 114 | + lr_warmup_iters: 50 |
| 115 | + lr_warmup_init: 1.9999e-6 |
| 116 | + |
| 117 | + distributed_data_parallel_config: |
| 118 | + grad_reduce_in_fp32: false |
| 119 | + overlap_grad_reduce: true |
| 120 | + overlap_param_gather: false |
| 121 | + average_in_collective: true |
| 122 | + data_parallel_sharding_strategy: "optim_grads_params" |
| 123 | + |
| 124 | + |
| 125 | +data: |
| 126 | + max_input_seq_length: ${policy.max_total_sequence_length} |
| 127 | + dataset_name: "HelpSteer3" |
| 128 | + |
| 129 | +logger: |
| 130 | + log_dir: "logs" # Base directory for all logs |
| 131 | + wandb_enabled: true # Make sure you do a ``wandb login [Your API key]'' before running |
| 132 | + tensorboard_enabled: true |
| 133 | + mlflow_enabled: false |
| 134 | + monitor_gpus: true # If true, will monitor GPU usage and log to wandb and/or tensorboard |
| 135 | + wandb: |
| 136 | + project: "rm-dev" |
| 137 | + name: "rm-dev-${data.dataset_name}" |
| 138 | + tensorboard: |
| 139 | + log_dir: "tb_logs-rm-dev-${data.dataset_name}" |
| 140 | + gpu_monitoring: |
| 141 | + collection_interval: 10 # How often to collect GPU usage metrics (in seconds) |
| 142 | + flush_interval: 10 # How often to flush GPU usage metrics to the loggers (in seconds) |
| 143 | + |
| 144 | +cluster: |
| 145 | + gpus_per_node: 1 |
| 146 | + num_nodes: 1 |
0 commit comments