Skip to content

Commit 7ab35ce

Browse files
authored
Align reduce precision with PyTorch 2.9.1, disable precision checks. (#11195)
1 parent bc08aee commit 7ab35ce

File tree

2 files changed

+10
-10
lines changed

2 files changed

+10
-10
lines changed

scripts/distribute/ci_case_auto.sh

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -107,27 +107,27 @@ function llama_case_list_auto() {
107107
# The test name must have "llama_" as a prefix, which will
108108
# be used for tracking the execution status of the case.
109109
llama_dygraph_auto_bs4_bf16_SD2
110-
llama_dygraph_auto_bs8_fp32_DP2
111-
llama_dygraph_auto_bs8_fp32_DP2-MP2
110+
# llama_dygraph_auto_bs8_fp32_DP2
111+
# llama_dygraph_auto_bs8_fp32_DP2-MP2
112112
llama_dygraph_auto_bs8_fp32_DP2-MP2-PP2
113-
llama_dygraph_auto_bs8_fp16_DP2-MP2-PP2
113+
# llama_dygraph_auto_bs8_fp16_DP2-MP2-PP2
114114
llama_dygraph_auto_bs8_fp16_DP2-MP2-CP2
115115
#llama_dygraph_auto_bs8_fp16_DP2-MP2-CP2_intermediate
116116
llama_dygraph_auto_bs8_fp16_DP2-MP2-PP2_hybrid_pp
117117
# llama_dygraph_auto_bs8_fp16_DP2-MP2-PP2_intermediate
118118
llama_dy2st_auto_bs4_bf16_DP1-MP1-PP4-SD2-VPP3_split_bw
119119
llama_dy2st_auto_bs4_bf16_DP1-MP1-PP4-SD2
120-
llama_align_dygraph_dy2st_auto_bs2_bf16_DP2-MP1-PP1
121-
llama_pir_auto_fuse_ffn_attention_qkv_MP2
120+
# llama_align_dygraph_dy2st_auto_bs2_bf16_DP2-MP1-PP1
121+
# llama_pir_auto_fuse_ffn_attention_qkv_MP2
122122
# llama_convert_hybrid_ckpt_to_auto_parallel_bs2_fp32_DP2-MP1-PP1
123123
llama_align_dygraph_dy2st_pir_auto_bs2_bf16_DP2-MP2-PP1-SP
124-
llama_align_dygraph_dy2st_pir_auto_bs2_bf16_DP2-MP2-PP2-SP
124+
# llama_align_dygraph_dy2st_pir_auto_bs2_bf16_DP2-MP2-PP2-SP
125125
llama_align_dygraph_dy2st_pir_auto_grad_merge_bs2_fp32_DP1-MP1-PP1
126-
llama_align_dy2st_fthenb_and_vpp_auto_bs2_fp32_DP1-MP1-PP4
126+
# llama_align_dy2st_fthenb_and_vpp_auto_bs2_fp32_DP1-MP1-PP4
127127
llama_align_dygraph_dy2st_pir_auto_pp_bs2_bf16_DP1-MP1-PP4
128128
llama_baichuan_pir_auto_fuse_ffn_attention_qkv_DP2_MP2_PP2
129129
# llama_baichuan_pir_auto_fuse_ffn_attention_qkv_DP2_MP2_PP2_intermediate
130-
llama_dy2st_auto_bs2_bf16_DP2-MP1-PP1-CINN
130+
# llama_dy2st_auto_bs2_bf16_DP2-MP1-PP1-CINN
131131
llama_lora_static_graph_auto_bs_2_bf16_DP2-TP2-PP1
132132
llama_dpo_dy2st_auto_bs2_bf16_MP8_intermediate
133133
llama_baichuan_dygraph_auto_sp_async_reduce_scatter_bs8_bf16_DP4-MP2-SP
@@ -171,7 +171,7 @@ function llm_gpt_case_list_auto() {
171171
fun_list=(
172172
# The test name must have "llm_gpt_dygraph_auto_" as a prefix,
173173
# which will be used for tracking the execution status of the case.
174-
llm_gpt_dygraph_auto_bs8_fp32_DP2
174+
# llm_gpt_dygraph_auto_bs8_fp32_DP2
175175
llm_gpt_dygraph_auto_bs8_fp32_DP2-MP2
176176
llm_gpt_dygraph_auto_bs8_fp32_DP2-MP2-PP2
177177
llm_gpt_dygraph_auto_bs8_fp16_DP2-MP2-PP2

scripts/distribute/ci_case_dy.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,7 @@ function llm_gpt_case_list_dygraph() {
133133
fun_list=(
134134
# The test name must have "llm_gpt_" as a prefix, which will
135135
# be used for tracking the execution status of the case.
136-
llm_gpt_recompute_bs32_bf16_MP2-SD4-stage1
136+
# llm_gpt_recompute_bs32_bf16_MP2-SD4-stage1
137137
)
138138
if [ $1 = "prepare_case" ]; then
139139
restore_func $fun_list

0 commit comments

Comments
 (0)