-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathrun_task2_evaluation_qwen_vl.py
More file actions
176 lines (155 loc) · 5.47 KB
/
run_task2_evaluation_qwen_vl.py
File metadata and controls
176 lines (155 loc) · 5.47 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
import argparse
import os
import json
import time
from vllm import LLM, SamplingParams
from vllm.sampling_params import GuidedDecodingParams
from PIL import Image
from utils import *
from sklearn.metrics import accuracy_score
from collections import defaultdict
from tqdm import tqdm
from load_models import *
from transformers import AutoProcessor
from qwen_vl_utils import process_vision_info
result_map = defaultdict(dict)
model_path_map = {
"qwen2-vl": {
"qwen2-vl-2b": "Qwen/Qwen2-VL-2B-Instruct",
"qwen2-vl-7b": "Qwen/Qwen2-VL-7B-Instruct",
"qwen2-vl-72b": "Qwen/Qwen2-VL-72B-Instruct-AWQ"
},
"qwen2.5-vl": {
"qwen2.5-vl-72b": "Qwen/Qwen2.5-VL-72B-Instruct-AWQ",
"qwen2.5-vl-3b": "Qwen/Qwen2.5-VL-3B-Instruct",
"qwen2.5-vl-7b": "Qwen/Qwen2.5-VL-7B-Instruct",
}
}
def load_qwen_vl(model_name):
model_type = "qwen2.5-vl" if "2.5" in model_name else "qwen2-vl"
model_path = model_path_map[model_type][model_name]
llm = LLM(
model=model_path,
limit_mm_per_prompt={"image": 10, "video": 10},
gpu_memory_utilization=0.9,
max_model_len=4096
)
processor = AutoProcessor.from_pretrained(model_path)
return llm, processor
def get_qwen_prompt(question, image, processor):
messages = [
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": [
{
"type": "image",
"image": image,
"min_pixels": 224 * 224,
"max_pixels": 1280 * 28 * 28,
},
{"type": "text", "text": question},
],
},
]
prompt = processor.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
)
image_inputs, _, video_kwargs = process_vision_info(messages, return_video_kwargs=True)
mm_data = {}
if image_inputs is not None:
mm_data["image"] = image_inputs
llm_inputs = {
"prompt": prompt,
"multi_modal_data": mm_data,
# FPS will be returned in video_kwargs
"mm_processor_kwargs": video_kwargs,
}
return llm_inputs
def load_args():
parser = argparse.ArgumentParser()
parser.add_argument('--model_type',
type=str,
default="llava",
choices=["qwen2-vl", "qwen2.5-vl"])
parser.add_argument('--model_used',
type=str,
default='all',
help='Number of prompts to run.')
parser.add_argument('--cuda',
type=str,
default='2')
parser.add_argument('--entity_task',
type=bool,
default=True)
parser.add_argument('--relation_task',
type=bool,
default=True)
return parser
def run_inference_with_vllm_on_single_model(model, model_name, args):
llm, processor = load_qwen_vl(model_name)
dataset_dict = load_task2_dataset()
guided_decode_params = GuidedDecodingParams(
choice=["Yes", "No"]
)
# the temperature should be set to 0 for fair evaluation
sampling_params = SamplingParams(
temperature=0.0,
max_tokens=4,
stop_token_ids=[],
guided_decoding=guided_decode_params
)
# Batch inference for Entity Counting
for dataset_type in dataset_dict:
print(dataset_type)
dataset = dataset_dict[dataset_type]
answers = []
predicts = []
for data_instance in tqdm(dataset):
try:
image = data_instance["image_file"]
load_temp = Image.open(data_instance["image_file"])
except:
continue
question = data_instance["input"]
input_data = get_qwen_prompt(question, image, processor)
answer = 1 if data_instance["answer"] == "Yes" else 0
answers.append(answer)
outputs = llm.generate([input_data], sampling_params=sampling_params, use_tqdm=False)
for o in outputs:
generated_text = o.outputs[0].text
predict = 1 if generated_text == "Yes" else 0
predicts.append(predict)
accuracy = accuracy_score(y_true=answers, y_pred=predicts)
print(dataset_type, accuracy)
result_map[model_name][dataset_type] = accuracy
result_map[model_name][dataset_type] = {
"result": accuracy,
"answer": answers,
"predict": predicts
}
def model_dispatch(model_type, model_name, args):
run_inference_with_vllm_on_single_model(model_type, model_name, args)
def run_inference(args):
os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda
model = args.model_type
if "qwen" not in model:
raise ValueError(f"Model type {model} is not supported.")
if args.model_used == "all":
all_model_names = model_path_map[model].keys()
for model_name in all_model_names:
model_dispatch(model, model_name, args)
else:
model_dispatch(model, model_name, args)
for model in result_map.keys():
print(model, result_map[model])
json.dump(result_map, open("{}_taks2_result.json".format(model), "w"), ensure_ascii=False)
if __name__ == "__main__":
parser = load_args()
args = parser.parse_args()
run_inference(args=args)