Skip to content

Commit bc8df6e

Browse files
authored
[CI] fix llama (#6720)
* [ci]fix llama * update
1 parent 4dfec25 commit bc8df6e

File tree

2 files changed

+2
-57
lines changed

2 files changed

+2
-57
lines changed

scripts/regression/ci_case.sh

Lines changed: 0 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -1198,59 +1198,4 @@ gpt-3() {
11981198
bash ${nlp_dir}/scripts/regression/ci_gpt-3.sh
11991199
print_info $? `ls -lt ${log_path} | grep gpt | head -n 1 | awk '{print $9}'`
12001200
}
1201-
llama(){
1202-
cd ${nlp_dir}/examples/language_model/llama/
1203-
# lora tuning
1204-
python -u -m paddle.distributed.fleet.launch finetune_generation.py \
1205-
--output_dir ./checkpoints/ \
1206-
--per_device_train_batch_size 2 \
1207-
--gradient_accumulation_steps 2 \
1208-
--per_device_eval_batch_size 4 \
1209-
--model_name_or_path "__internal_testing__/micro-random-llama" \
1210-
--task_name squad \
1211-
--warmup_steps 30 \
1212-
--logging_steps 1 \
1213-
--max_steps 1 \
1214-
--save_steps 1 \
1215-
--evaluation_strategy epoch \
1216-
--save_strategy epoch \
1217-
--src_length 1024 \
1218-
--tgt_length 1024 \
1219-
--fp16 \
1220-
--fp16_opt_level O2 \
1221-
--do_train \
1222-
--disable_tqdm True \
1223-
--load_best_model_at_end True \
1224-
--metric_for_best_model accuracy \
1225-
--eval_with_do_generation False \
1226-
--recompute \
1227-
--save_total_limit 1 \
1228-
--overwrite_output_dir >${log_path}/llama_finetune>>${log_path}/llama_finetune 2>&1
1229-
print_info $? llama_finetune
1230-
}
1231-
bloom(){
1232-
cd ${nlp_dir}examples/language_model/bloom
1233-
python -m paddle.distributed.launch finetune_generation.py \
1234-
--model_name_or_path bigscience/bloom-560m \
1235-
--task_name_or_path "dureader_qg" \
1236-
--output_dir ./checkpoints/bloom-560m \
1237-
--per_device_train_batch_size 2 \
1238-
--gradient_accumulation_steps 2 \
1239-
--per_device_eval_batch_size 4 \
1240-
--logging_steps 1 \
1241-
--max_steps 1 \
1242-
--save_steps 1 \
1243-
--evaluation_strategy epoch \
1244-
--save_strategy epoch \
1245-
--tensor_parallel_degree 2 \
1246-
--recompute \
1247-
--save_total_limit 1 \
1248-
--scale_loss 32768 \
1249-
--overwrite_output_dir
1250-
}
1251-
refactor_training_loop(){
1252-
llama
1253-
gpt
1254-
transformers
1255-
}
12561201
$1

scripts/regression/run_ci.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,8 +31,8 @@ declare -A Build_list
3131
all_P0case_dic=(["waybill_ie"]=3 ["msra_ner"]=15 ["glue"]=2 ["bert"]=2 ["skep"]=10 ["bigbird"]=2 ["electra"]=2 ["gpt"]=2 ["ernie-1.0"]=2 ["xlnet"]=2 \
3232
["ofa"]=2 ["albert"]=2 ["SQuAD"]=20 ["lexical_analysis"]=5 ["seq2seq"]=5 ["word_embedding"]=5 \
3333
["ernie-ctm"]=5 ["distilbert"]=5 ["transformer"]=5 ["pet"]=5 ["efl"]=5 ["p-tuning"]=5 ["ernie-doc"]=20 ["transformer-xl"]=5 \
34-
["question_matching"]=5 ["ernie-csc"]=5 ["nptag"]=5 ["ernie-m"]=5 ["taskflow"]=5 ["clue"]=5 ["textcnn"]=5 ["transformers"]=20 \
35-
["fast_generation"]=10 ["ernie-3.0"]=5 ["ernie-layout"]=5 ["uie"]=5 ["ernie-health"]=5 ["llama"]=5 \
34+
["question_matching"]=5 ["ernie-csc"]=5 ["nptag"]=5 ["ernie-m"]=5 ["taskflow"]=5 ["clue"]=5 ["textcnn"]=5 \
35+
["fast_generation"]=10 ["ernie-3.0"]=5 ["ernie-layout"]=5 ["uie"]=5 ["ernie-health"]=5 \
3636
["ernie"]=2 ["ernie_m"]=5 ["ernie_layout"]=5 ["ernie_csc"]=5 ["ernie_ctm"]=5 ["ernie_doc"]=20 ["ernie_health"]=5 ["gpt-3"]=5)
3737
####################################
3838
# Insatll paddlepaddle-gpu

0 commit comments

Comments
 (0)