@@ -6,7 +6,7 @@ start_indices=(75)
66# Loop through each start index and run the command
77for start_index in " ${start_indices[@]} " ; do
88 echo " Running command with start index: $start_index "
9- CUDA_VISIBLE_DEVICES=1 FSDP_CPU_RAM_EFFICIENT_LOADING=1 torchrun --nnodes 1 --nproc_per_node 1 \
9+ CUDA_VISIBLE_DEVICES=0 FSDP_CPU_RAM_EFFICIENT_LOADING=1 torchrun --nnodes 1 --nproc_per_node 1 \
1010 recipes/quickstart/finetuning/finetuning_wm.py \
1111 --enable_fsdp \
1212 --lr 1e-4 \
@@ -18,9 +18,8 @@ for start_index in "${start_indices[@]}"; do
1818 --use_fast_kernels \
1919 --dataset " custom_dataset" \
2020 --custom_dataset.test_split " test" \
21- --custom_dataset.file " recipes/quickstart/finetuning/datasets/realfork_dataset_latent_all .py" \
21+ --custom_dataset.file " recipes/quickstart/finetuning/datasets/realfork_dataset_latent .py" \
2222 --custom_dataset.data_path " realfork_data" \
23- --custom_dataset.answer_type " open-word-fork" \
2423 --custom_dataset.num_images 16 \
2524 --custom_dataset.sample_size 16 \
2625 --custom_dataset.num_history_images 1 \
@@ -31,7 +30,7 @@ for start_index in "${start_indices[@]}"; do
3130 --use_peft \
3231 --peft_method lora \
3332 --use_wm True \
34- --output_dir /data/finetuned_models/run_02_21_custom_wm_150k_vlm_finetuning_0.2%_imagined_step63_1_history_16sample_size_fork_task_open-word-fork-all_18epoch_print_eval_metrics_3class_aug_failure_by2_shuffle_key_correct_prompt_hist_no_start_from_ $start_index \
33+ --output_dir /data/finetuned_models/run_fork_exp \
3534 --target_modules [' down_proj' ,' multi_modal_linear' ,' o_proj' ,' k_proj' ,' q_proj' ,' gate_proj' ,' up_proj' ,' v_proj' ] \
3635 --use_wandb \
3736 --custom_dataset.start_index $start_index
0 commit comments