Skip to content

Commit 8779d06

Browse files
committed
update training sh files
1 parent 9731039 commit 8779d06

File tree

3 files changed

+43
-5
lines changed

3 files changed

+43
-5
lines changed

run_exp_bag.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ for start_index in "${start_indices[@]}"; do
3232
--peft_method lora \
3333
--use_wm True \
3434
--seed 128 \
35-
--output_dir /data/finetuned_models/run_01_16_custom_wm_gt_seed_128_vlm_finetuning_0.2%_gt_imagined_step63_1_history_16sample_size_bag_new_answer_set_task_open-word_test2_18epoch_print_eval_metrics_3class_bag_aug_to_same_save_every_shuffle_key_no_demo_data_hist_no_start_from_$start_index \
35+
--output_dir /data/finetuned_models/run_bag_exp \
3636
--target_modules ['down_proj','multi_modal_linear','o_proj','k_proj','q_proj','gate_proj','up_proj','v_proj'] \
3737
--use_wandb \
3838
--custom_dataset.start_index $start_index

run_exp_cup.sh

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
#!/bin/bash
2+
3+
# Array of start indices
4+
start_indices=(35)
5+
6+
# Loop through each start index and run the command
7+
for start_index in "${start_indices[@]}"; do
8+
echo "Running command with start index: $start_index"
9+
CUDA_VISIBLE_DEVICES=1 FSDP_CPU_RAM_EFFICIENT_LOADING=1 torchrun --nnodes 1 --nproc_per_node 1 \
10+
recipes/quickstart/finetuning/finetuning_wm.py \
11+
--enable_fsdp \
12+
--lr 1e-4 \
13+
--num_epochs 10 \
14+
--batch_size_training 10 \
15+
--model_name mllama/Llama-3.2-11B-Vision-Instruct/custom \
16+
--dist_checkpoint_root_folder ./finetuned_model \
17+
--dist_checkpoint_folder fine-tuned \
18+
--use_fast_kernels \
19+
--dataset "custom_dataset" \
20+
--custom_dataset.test_split "test" \
21+
--custom_dataset.file "recipes/quickstart/finetuning/datasets/realcup_dataset_latent.py" \
22+
--custom_dataset.data_path "realcup_data" \
23+
--custom_dataset.answer_type "open-word" \
24+
--custom_dataset.num_images 16 \
25+
--custom_dataset.sample_size 16 \
26+
--custom_dataset.num_history_images 1 \
27+
--custom_dataset.imagined_steps 63 \
28+
--custom_dataset.latent_mode "all" \
29+
--run_validation True \
30+
--batching_strategy padding \
31+
--use_peft \
32+
--peft_method lora \
33+
--use_wm True \
34+
--output_dir /data/finetuned_models/run_cup_exp \
35+
--target_modules ['down_proj','multi_modal_linear','o_proj','k_proj','q_proj','gate_proj','up_proj','v_proj'] \
36+
--use_wandb \
37+
--custom_dataset.start_index $start_index
38+
done
39+

run_exp_fork_all.sh

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ start_indices=(75)
66
# Loop through each start index and run the command
77
for start_index in "${start_indices[@]}"; do
88
echo "Running command with start index: $start_index"
9-
CUDA_VISIBLE_DEVICES=1 FSDP_CPU_RAM_EFFICIENT_LOADING=1 torchrun --nnodes 1 --nproc_per_node 1 \
9+
CUDA_VISIBLE_DEVICES=0 FSDP_CPU_RAM_EFFICIENT_LOADING=1 torchrun --nnodes 1 --nproc_per_node 1 \
1010
recipes/quickstart/finetuning/finetuning_wm.py \
1111
--enable_fsdp \
1212
--lr 1e-4 \
@@ -18,9 +18,8 @@ for start_index in "${start_indices[@]}"; do
1818
--use_fast_kernels \
1919
--dataset "custom_dataset" \
2020
--custom_dataset.test_split "test" \
21-
--custom_dataset.file "recipes/quickstart/finetuning/datasets/realfork_dataset_latent_all.py" \
21+
--custom_dataset.file "recipes/quickstart/finetuning/datasets/realfork_dataset_latent.py" \
2222
--custom_dataset.data_path "realfork_data" \
23-
--custom_dataset.answer_type "open-word-fork" \
2423
--custom_dataset.num_images 16 \
2524
--custom_dataset.sample_size 16 \
2625
--custom_dataset.num_history_images 1 \
@@ -31,7 +30,7 @@ for start_index in "${start_indices[@]}"; do
3130
--use_peft \
3231
--peft_method lora \
3332
--use_wm True \
34-
--output_dir /data/finetuned_models/run_02_21_custom_wm_150k_vlm_finetuning_0.2%_imagined_step63_1_history_16sample_size_fork_task_open-word-fork-all_18epoch_print_eval_metrics_3class_aug_failure_by2_shuffle_key_correct_prompt_hist_no_start_from_$start_index \
33+
--output_dir /data/finetuned_models/run_fork_exp \
3534
--target_modules ['down_proj','multi_modal_linear','o_proj','k_proj','q_proj','gate_proj','up_proj','v_proj'] \
3635
--use_wandb \
3736
--custom_dataset.start_index $start_index

0 commit comments

Comments
 (0)