Skip to content

Commit e22fd9e

Browse files
author
Haozhe Qi
committed
small change
1 parent 78bfd64 commit e22fd9e

File tree

2 files changed

+15
-8
lines changed

2 files changed

+15
-8
lines changed

.vscode/launch.json

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -103,16 +103,19 @@
103103
"WANDB_API_KEY": "65aeda82a75f1eed29c8e9250b175fcc73dca0d7",
104104
"CUDA_LAUNCH_BLOCKING": "1",
105105
"HF_HOME": "/media/data/haozhe/VFM/huggingface",
106+
"OPENAI_API_KEY": "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"
106107
},
107108
"args": [
108109
"--num_processes", "4",
109110
"-m", "lmms_eval",
110-
"--model", "llava_vid",
111-
"--model_args", "pretrained=experiments/dev_LLaVA-Video-7B-Qwen2_4f_test_haozhe,conv_template=qwen_1_5,max_frames_num=64,mm_spatial_pool_mode=average",
112-
"--tasks", "videomme",
111+
// "--model", "llava_vid",
112+
"--model", "llava_onevision",
113+
// "--model_args", "pretrained=experiments/dev_LLaVA-Video-7B-Qwen2_4f_test_haozhe,conv_template=qwen_1_5,max_frames_num=64,mm_spatial_pool_mode=average",
114+
"--model_args", "pretrained=lmms-lab/llava-onevision-qwen2-0.5b-ov,conv_template=qwen_1_5,model_name=llava_qwen",
115+
"--tasks", "video_dc499",
113116
"--batch_size", "1",
114117
"--log_samples",
115-
"--log_samples_suffix", "llava_vid_retrained",
118+
"--log_samples_suffix", "llava_onevision",
116119
"--output_path", "./logs/"
117120
],
118121
"console": "integratedTerminal",

run_clariden.sbatch

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -75,10 +75,10 @@ PYTHON_ARGS=" \
7575
--output_dir experiments/dev_7b_4f_llavavideo_test_haozhe \
7676
--num_train_epochs 1 \
7777
--per_device_train_batch_size 1 \
78-
--per_device_eval_batch_size 1 \
78+
--per_device_eval_batch_size 4 \
7979
--gradient_accumulation_steps 2 \
80-
--evaluation_strategy no \
81-
--eval_steps 2000 \
80+
--evaluation_strategy epoch \
81+
--eval_steps 1 \
8282
--save_strategy steps \
8383
--save_steps 2000 \
8484
--learning_rate 1e-5 \
@@ -95,7 +95,11 @@ PYTHON_ARGS=" \
9595
--torch_compile True \
9696
--torch_compile_backend inductor \
9797
--dataloader_drop_last True \
98-
--frames_upbound 4 \
98+
--frames_upbound 64 \
99+
--mm_newline_position grid \
100+
--add_time_instruction True \
101+
--force_sample True \
102+
--mm_spatial_pool_stride 2
99103
--root /iopsstor/scratch/cscs/hqi/VFM/onevision/llava_video/EK100 \
100104
--action_predictions /iopsstor/scratch/cscs/hqi/VFM/llava_data/TIM_PREDS/tim_pred_ids_val.json \
101105
--val_metadata /iopsstor/scratch/cscs/hqi/VFM/EK100/epic-kitchens-100-annotations/EPIC_100_validation.csv \

0 commit comments

Comments
 (0)