File tree Expand file tree Collapse file tree 4 files changed +32
-1
lines changed Expand file tree Collapse file tree 4 files changed +32
-1
lines changed Original file line number Diff line number Diff line change 1- # OMP_NUM_THREADS=14 please Check issue:https://github.com/AutoGPTQ/AutoGPTQ/issues/439
1+ # OMP_NUM_THREADS=14 please Check issue: https://github.com/AutoGPTQ/AutoGPTQ/issues/439
22OMP_NUM_THREADS=14 \
33CUDA_VISIBLE_DEVICES=0 \
44swift export \
Original file line number Diff line number Diff line change 1+ CUDA_VISIBLE_DEVICES=0 \
2+ swift export \
3+ --model Qwen/Qwen2-VL-2B-Instruct \
4+ --dataset ' AI-ModelScope/alpaca-gpt4-data-zh#500' \
5+ ' AI-ModelScope/alpaca-gpt4-data-en#500' \
6+ --quant_n_samples 256 \
7+ --quant_batch_size -1 \
8+ --max_length 2048 \
9+ --quant_method awq \
10+ --quant_bits 4 \
11+ --output_dir Qwen/Qwen2-VL-2B-Instruct-AWQ
Original file line number Diff line number Diff line change 1+ # OMP_NUM_THREADS=14 please Check issue: https://github.com/AutoGPTQ/AutoGPTQ/issues/439
2+ OMP_NUM_THREADS=14 \
3+ CUDA_VISIBLE_DEVICES=0 \
4+ MAX_PIXELS=1003520 \
5+ VIDEO_MAX_PIXELS=50176 \
6+ FPS_MAX_FRAMES=12 \
7+ swift export \
8+ --model Qwen/Qwen2-VL-2B-Instruct \
9+ --dataset ' AI-ModelScope/alpaca-gpt4-data-zh#500' \
10+ ' AI-ModelScope/alpaca-gpt4-data-en#500' \
11+ ' modelscope/coco_2014_caption:validation#500' \
12+ ' swift/VideoChatGPT:Generic#500' \
13+ --quant_n_samples 256 \
14+ --quant_batch_size 1 \
15+ --max_length 2048 \
16+ --quant_method gptq \
17+ --quant_bits 4 \
18+ --output_dir Qwen/Qwen2-VL-2B-Instruct-GPTQ-Int4
Original file line number Diff line number Diff line change @@ -75,6 +75,8 @@ def _init_output_dir(self):
7575 assert not os .path .exists (self .output_dir ), f'args.output_dir: { self .output_dir } already exists.'
7676
7777 def __post_init__ (self ):
78+ if self .quant_batch_size == - 1 :
79+ self .quant_batch_size = None
7880 if self .quant_bits and self .quant_method is None :
7981 raise ValueError ('Please specify the quantization method using `--quant_method awq/gptq/bnb`.' )
8082 if self .quant_method and self .quant_bits is None :
You can’t perform that action at this time.
0 commit comments