File tree Expand file tree Collapse file tree 1 file changed +7
-11
lines changed
Expand file tree Collapse file tree 1 file changed +7
-11
lines changed Original file line number Diff line number Diff line change 1- MODEL=EVA02-CLIP-L-14-336
1+ MODEL=EVA02-CLIP-L-14
22PRETRAINED=eva_clip
3- python -m torch.distributed.launch --nproc_per_node=8 \
3+ python -m torch.distributed.launch --nproc_per_node=2 \
44 --use_env training/main.py \
55 --enable-deepspeed \
66 --grad-checkpointing \
77 --name=" T_vitl336_Rcc12mR_Rcc3m_4ep" \
88 --save-frequency 1 \
99 --zeroshot-frequency 1 \
10- --report-to=" tensorboard, wandb " \
10+ --report-to=" " \
1111 --wandb-project-name=" LLM2CLIP" \
1212 --wandb-notes=" EVA02-CLIP-L-14-336" \
13- --train-data-list " data/cc3m/cc3m-train-{00..0287}.tar;data/cc12m/cc12m-train-{00..1001}.tar" \
14- --train-num-samples-list 2873538 10000225 \
15- --eval-data-file=training/eval_datasets.yaml \
16- --imagenet-val=data/eval_data/imagenet/val.zip \
17- --imagenet-val-text=data/eval_data/imagenet/val_map.txt \
18- --imagenet-classname-feautres data/eval_data/imagenet/im_classname_llm_features.dpt \
13+ --train-data-list " /home/aiscuser/LLM2CLIP/llm2clip/data/cc3m/{00..00004}.tar" \
14+ --train-num-samples-list 2873538 \
1915 --pretrained=${PRETRAINED} \
2016 --dataset-resampled \
2117 --precision " fp16" \
2218 --warmup 0 \
23- --batch-size=512 \
19+ --batch-size=16 \
2420 --eval-batch-size=1024 \
2521 --log-every-n-steps 50 \
2622 --epochs=20 \
@@ -35,7 +31,7 @@ python -m torch.distributed.launch --nproc_per_node=8 \
3531 --visual-ld=0.85 \
3632 --grad-clip-norm=5.0 \
3733 --smoothing=0. \
38- --workers=8 \
34+ --workers=1 \
3935 --model=${MODEL} \
4036 --seed 4096 \
4137 --gather-with-grad \
You can’t perform that action at this time.
0 commit comments