File tree Expand file tree Collapse file tree 2 files changed +2
-3
lines changed Expand file tree Collapse file tree 2 files changed +2
-3
lines changed Original file line number Diff line number Diff line change @@ -270,7 +270,7 @@ def gen_metadata_and_prepare_source_state_dict(self):
270
270
malloc_size = 0
271
271
for opt_state_name , opt_state_value in optimizer_state_dict .items ():
272
272
malloc_size += opt_state_value .numel () * opt_state_value .element_size ()
273
- malloc_size = malloc_size . numpy () / 2 ** 20
273
+ malloc_size = malloc_size / 2 ** 20
274
274
logger .debug (f"{ malloc_size } MB of GPU memory were allocated." )
275
275
276
276
# merge sharding
@@ -555,7 +555,7 @@ def load_state_dict_and_rename(self):
555
555
for k , v in state_dict .items ():
556
556
memory_size += v .numel () * v .element_size ()
557
557
558
- memory_size = memory_size . numpy () / 2 ** 20
558
+ memory_size = memory_size / 2 ** 20
559
559
logger .debug (
560
560
f"The current rank has finished loading the checkpoint file and has allocated { memory_size } MB of GPU memory."
561
561
)
Original file line number Diff line number Diff line change @@ -95,7 +95,6 @@ function llama_case_list_auto() {
95
95
llama_dy2st_auto_bs4_bf16_DP1-MP1-PP4-SD2
96
96
llama_align_dygraph_dy2st_auto_bs2_bf16_DP2-MP1-PP1
97
97
llama_pir_auto_fuse_ffn_attention_qkv_MP2
98
- llama_convert_hybrid_ckpt_to_auto_parallel_bs2_fp32_DP2-MP1-PP1
99
98
llama_align_dygraph_dy2st_pir_auto_bs2_bf16_DP2-MP2-PP1-SP
100
99
llama_align_dygraph_dy2st_pir_auto_bs2_bf16_DP2-MP2-PP2-SP
101
100
llama_align_dygraph_dy2st_pir_auto_grad_merge_bs2_fp32_DP1-MP1-PP1
You can’t perform that action at this time.
0 commit comments