-
Notifications
You must be signed in to change notification settings - Fork 16
Expand file tree
/
Copy pathpretrain_plm_1point8b_ptd.sh
More file actions
121 lines (111 loc) · 2.82 KB
/
pretrain_plm_1point8b_ptd.sh
File metadata and controls
121 lines (111 loc) · 2.82 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
#!/bin/bash
export HCCL_CONNECT_TIMEOUT=1800
export CUDA_DEVICE_MAX_CONNECTIONS=1
export NPU_ASD_ENABLE=0
export HCCL_BUFFSIZE=64
export CPU_AFFINITY_CONF=1,lazy_bind:0
NPUS_PER_NODE=8
MASTER_ADDR=localhost
MASTER_PORT=6006
NNODES=1
NODE_RANK=0
WORLD_SIZE=$(($NPUS_PER_NODE*$NNODES))
# please fill these path configurations
CKPT_SAVE_DIR="your model save ckpt path"
DATA_PATH="your data path"
TOKENIZER_MODEL="your tokenizer path"
CKPT_LOAD_DIR="your model ckpt path"
TP=1
PP=1
DISTRIBUTED_ARGS="
--nproc_per_node $NPUS_PER_NODE \
--nnodes $NNODES \
--node_rank $NODE_RANK \
--master_addr $MASTER_ADDR \
--master_port $MASTER_PORT
"
MLA_ARGS="
--spec mindspeed_llm.tasks.models.spec.plm_spec layer_spec \
--multi-latent-attention \
--qk-pos-emb-head-dim 64 \
--qk-head-dim 128 \
--kv-lora-rank 512 \
--v-head-dim 128 \
"
ROPE_ARGS="
--rope-scaling-type plm
"
GPT_ARGS="
--load $CKPT_LOAD_DIR \
--use-distributed-optimizer \
--overlap-grad-reduce \
--overlap-param-gather \
--use-flash-attn \
--use-mcore-models \
--tensor-model-parallel-size ${TP} \
--pipeline-model-parallel-size ${PP} \
--sequence-parallel \
--num-layers 32 \
--hidden-size 2048 \
--ffn-hidden-size 8192 \
--num-attention-heads 16 \
--num-query-groups 16 \
--tokenizer-type PretrainedFromHF \
--tokenizer-name-or-path ${TOKENIZER_MODEL} \
--seq-length 4096 \
--max-position-embeddings 4096 \
--micro-batch-size 2 \
--global-batch-size 16 \
--make-vocab-size-divisible-by 1 \
--lr 1.25e-06 \
--train-iters 2000 \
--lr-decay-style constant \
--lr-decay-iters 2000 \
--disable-bias-linear \
--attention-dropout 0 \
--init-method-std 0.008 \
--hidden-dropout 0 \
--position-embedding-type rope \
--rotary-base 100000 \
--normalization RMSNorm \
--use-fused-rotary-pos-emb \
--use-rotary-position-embeddings \
--use-fused-rmsnorm \
--squared-relu \
--no-masked-softmax-fusion \
--attention-softmax-in-fp32 \
--weight-decay 0.1 \
--clip-grad 1.0 \
--adam-beta1 0.9 \
--adam-beta2 0.999 \
--initial-loss-scale 1 \
--vocab-size 151936 \
--padded-vocab-size 151936 \
--no-gradient-accumulation-fusion \
--norm-epsilon 1e-6 \
--no-load-optim \
--no-load-rng \
--bf16
"
DATA_ARGS="
--data-path $DATA_PATH \
--split 100,0,0
"
OUTPUT_ARGS="
--log-interval 1 \
--save-interval 2000 \
--eval-interval 1000 \
--eval-iters 0 \
--no-save-optim \
--no-save-rng
"
python -m torch.distributed.launch $DISTRIBUTED_ARGS pretrain_gpt.py \
$GPT_ARGS \
$DATA_ARGS \
$OUTPUT_ARGS \
$MLA_ARGS \
$ROPE_ARGS \
--distributed-backend nccl \
--save $CKPT_SAVE_DIR \
--transformer-impl local \
| tee ./logs/train_plm_1point8b.log