forked from PaddlePaddle/PaddleFormers
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdsv3_32k_config.yaml
More file actions
84 lines (76 loc) · 1.9 KB
/
dsv3_32k_config.yaml
File metadata and controls
84 lines (76 loc) · 1.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
### data
train_dataset_type: erniekit
eval_dataset_type: erniekit
train_dataset_path: "/root/train.json" # 数据集存放路径
train_dataset_prob: "1.0"
eval_dataset_path: "/root/eval.json" # 数据集存放路径
eval_dataset_prob: "1.0"
max_seq_len: 32768
num_samples_each_epoch: 6000000
packing: true
### model
model_name_or_path: "/root/huggingface_model/DeepSeek-V3-bf16/" # 模型存放路径
convert_from_hf: true
### finetuning
# base
stage: SFT
fine_tuning: full
do_train: true
do_eval: false
per_device_eval_batch_size: 1
per_device_train_batch_size: 1
num_train_epochs: 1
num_nextn_predict_layers: 1
max_steps: 100
evaluation_strategy: no
save_steps: 100
save_total_limit: 1
save_strategy: no
logging_steps: 1
gradient_accumulation_steps: 16
logging_dir: ./vdl_log
output_dir: ./checkpoints/dsv3
disable_tqdm: true
eval_accumulation_steps: 1
load_best_model_at_end: false
eval_with_do_generation: false
metric_for_best_model: "loss"
hybrid_parallel_topo_order: "sharding_first"
unified_checkpoint: true
unified_checkpoint_config: "ignore_merge_optimizer"
# train
warmup_steps: 30
learning_rate: 7e-06
continue_training: true
# performance
tensor_model_parallel_size: 8
sequence_parallel: true
pipeline_model_parallel_size: 8
sharding_parallel_size: 2
use_expert_parallel: true
expert_model_parallel_size: 16
# tensor_parallel_config
tp_delay_scale_loss: true
tp_sync_param: true
sync_grad: true
# pipeline_parallel_config
pp_delay_scale_loss: true
partial_send_recv: false
batch_p2p_comm: false
# sharding_parallel_config
split_param: true
recompute_granularity: full
recompute_method: uniform
recompute_num_layers: 1
recompute_use_reentrant: true
sharding: stage1
bf16: true
amp_master_grad: true
fp16_opt_level: O2
use_flash_attention: true
use_attn_mask_startend_row_indices: true
using_fake_gate: false
pre_alloc_memory: 60
tensorwise_offload_optimizer: true
fuse_rms_norm: true
moe_subbatch_token_num_before_dispatch: 0