-
Notifications
You must be signed in to change notification settings - Fork 79
Expand file tree
/
Copy pathapp.py
More file actions
504 lines (444 loc) · 17.5 KB
/
app.py
File metadata and controls
504 lines (444 loc) · 17.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
from contextlib import asynccontextmanager
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel, field_validator
from typing import Optional, List, Union, Dict, Any
import torch
from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor
from qwen_vl_utils import process_vision_info
import uvicorn
import json
from datetime import datetime
import logging
import time
import psutil
import GPUtil
import base64
from PIL import Image
import io
import argparse
import shutil
import os
MODEL_DIR_BASE = "./app/models/"
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
# Global variables
model = None
current_loaded_model = None
processor = None
device = "cuda" if torch.cuda.is_available() else "cpu"
parser = argparse.ArgumentParser()
parser.add_argument('--port', type=int, default=9192, help="Which port to listen on for HTTP API requests")
parser.add_argument('--model', type=str, default='Qwen2.5-VL-7B-Instruct', help="Which Qwen 2.5 VL model to load")
parser.add_argument(
'--resume',
action='store_true',
help="Attempt to resume partial downloads if possible"
)
parser.add_argument(
'--quant',
type=str,
choices=['int8', 'int4'],
default=None,
help='Quantization level for model loading (requires bitsandbytes and CUDA)'
)
args = parser.parse_args()
class ImageURL(BaseModel):
url: str
class MessageContent(BaseModel):
type: str
text: Optional[str] = None
image_url: Optional[Dict[str, str]] = None
@field_validator('type')
@classmethod
def validate_type(cls, v: str) -> str:
if v not in ['text', 'image_url']:
raise ValueError(f"Invalid content type: {v}")
return v
class ChatMessage(BaseModel):
role: str
content: Union[str, List[MessageContent]]
@field_validator('role')
@classmethod
def validate_role(cls, v: str) -> str:
if v not in ['system', 'user', 'assistant']:
raise ValueError(f"Invalid role: {v}")
return v
@field_validator('content')
@classmethod
def validate_content(cls, v: Union[str, List[Any]]) -> Union[str, List[MessageContent]]:
if isinstance(v, str):
return v
if isinstance(v, list):
return [MessageContent(**item) if isinstance(item, dict) else item for item in v]
raise ValueError("Content must be either a string or a list of content items")
class ChatCompletionRequest(BaseModel):
model: str
messages: List[ChatMessage]
temperature: Optional[float] = 0.7
top_p: Optional[float] = 0.95
max_tokens: Optional[int] = 2048
stream: Optional[bool] = False
response_format: Optional[Dict[str, str]] = None
class ChatCompletionResponse(BaseModel):
id: str
object: str
created: int
model: str
choices: List[Dict[str, Any]]
usage: Dict[str, int]
class ModelCard(BaseModel):
id: str
created: int
owned_by: str
permission: List[Dict[str, Any]] = []
root: Optional[str] = None
parent: Optional[str] = None
capabilities: Optional[Dict[str, bool]] = None
context_window: Optional[int] = None
max_tokens: Optional[int] = None
class ModelList(BaseModel):
object: str = "list"
data: List[ModelCard]
def process_base64_image(base64_string: str) -> Image.Image:
"""Process base64 image data and return PIL Image"""
try:
# Remove data URL prefix if present
if 'base64,' in base64_string:
base64_string = base64_string.split('base64,')[1]
image_data = base64.b64decode(base64_string)
image = Image.open(io.BytesIO(image_data))
# Convert to RGB if necessary
if image.mode not in ('RGB', 'L'):
image = image.convert('RGB')
return image
except Exception as e:
logger.error(f"Error processing base64 image: {str(e)}")
raise ValueError(f"Invalid base64 image data: {str(e)}")
def log_system_info():
"""Log system resource information"""
try:
cpu_percent = psutil.cpu_percent(interval=1)
memory = psutil.virtual_memory()
gpu_info = []
if torch.cuda.is_available():
for gpu in GPUtil.getGPUs():
gpu_info.append({
'id': gpu.id,
'name': gpu.name,
'load': f"{gpu.load*100}%",
'memory_used': f"{gpu.memoryUsed}MB/{gpu.memoryTotal}MB",
'temperature': f"{gpu.temperature}°C"
})
logger.info(f"System Info - CPU: {cpu_percent}%, RAM: {memory.percent}%, "
f"Available RAM: {memory.available/1024/1024/1024:.1f}GB")
if gpu_info:
logger.info(f"GPU Info: {gpu_info}")
except Exception as e:
logger.warning(f"Failed to log system info: {str(e)}")
def download_model(model_name: str):
"""Download and save model files under a subdirectory named after the given model name"""
target_dir = os.path.join(MODEL_DIR_BASE, model_name)
try:
# Create target directory structure
os.makedirs(target_dir, exist_ok=True)
logger.info(f"Downloading {model_name} processor configuration...")
processor = AutoProcessor.from_pretrained(
f"Qwen/{model_name}",
)
processor.save_pretrained(target_dir)
logger.info(f"Downloading {model_name} model files...")
with torch.inference_mode():
model_temp = Qwen2_5_VLForConditionalGeneration.from_pretrained(
f"Qwen/{model_name}",
torch_dtype=torch.float16,
device_map="auto",
offload_folder="offload",
offload_state_dict=True,
low_cpu_mem_usage=True
)
logger.info(f"Saving model to {target_dir}...")
model_temp.save_pretrained(
target_dir,
safe_serialization=True,
max_shard_size="2GB"
)
# Cleanup temporary files
if os.path.exists("offload"):
shutil.rmtree("offload")
except Exception as e:
logger.error(f"Model download failed: {str(e)}", exc_info=True)
raise RuntimeError(f"Failed to save model '{model_name}'") from e
def initialize_model(model_name: str):
"""Initialize the model and processor"""
global model, processor, current_loaded_model
if model is None or processor is None:
try:
start_time = time.time()
logger.info("Starting model initialization...")
log_system_info()
# Construct full path to model directory
model_dir_path = os.path.join(MODEL_DIR_BASE, model_name)
# Check and download model files if needed
if not os.path.exists(model_dir_path):
logger.warning(f"Model '{model_name}' not found. Downloading now...")
try:
download_model(model_name)
except Exception as e:
raise RuntimeError("Download failed: " + str(e))
elif args.resume:
logger.warning(f"Resuming download of model '{model_name}'.")
try:
download_model(model_name)
except Exception as e:
raise RuntimeError("Download failed: " + str(e))
else:
logger.info(f"Using existing files from {model_dir_path}")
# Check for flash attention availability first
use_flash = False
try:
import flash_attn
logger.info("Flash attention is available, using it...")
use_flash = True
except ImportError:
logger.warning("Flash attention not available. Using default implementation.")
if args.quant: # Quantization requested
if not torch.cuda.is_available():
raise RuntimeError("Quantization requires CUDA support")
try:
import bitsandbytes as bnb
except ImportError:
logger.error(
"bitsandbytes is required for quantization. Install with: pip install bitsandbytes -U"
)
raise
model_kwargs = {
'device_map': 'auto',
'torch_dtype': torch.float16,
'local_files_only': True
}
# Add flash attention if available
if use_flash:
model_kwargs['attn_implementation'] = "flash_attention_2"
if args.quant == 'int8':
model_kwargs["load_in_8bit"] = True
elif args.quant == 'int4':
model_kwargs.update({
"load_in_4bit": True,
"bnb_4bit_use_double_quant": True,
"bnb_4bit_compute_dtype": torch.float16
})
# Load quantized model
logger.info(f"Loading {args.quant}-bit quantized model...")
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
model_dir_path,
**model_kwargs
).eval()
else: # Default loading path without quantization
try:
import flash_attn
logger.info("Flash attention is available, using it...")
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
model_dir_path,
torch_dtype=torch.bfloat16,
attn_implementation="flash_attention_2",
device_map="auto",
local_files_only=True
).eval()
except (ImportError, ModuleNotFoundError) as e:
logger.warning(f"Flash attention not available: {str(e)}")
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
model_dir_path,
torch_dtype=torch.bfloat16,
device_map="auto",
local_files_only=True
).eval()
processor = AutoProcessor.from_pretrained(model_dir_path, local_files_only=True)
current_loaded_model = model_name
end_time = time.time()
logger.info(f"Model initialized in {end_time - start_time:.2f} seconds")
log_system_info()
except Exception as e:
logger.error(f"Error initializing model: {str(e)}", exc_info=True)
raise
@asynccontextmanager
async def lifespan(app: FastAPI):
logger.info("Starting application initialization...")
try:
initialize_model(args.model)
logger.info("Application startup complete!")
yield
finally:
logger.info("Shutting down application...")
global model, processor
if model is not None:
try:
del model
torch.cuda.empty_cache()
logger.info("Model unloaded and CUDA cache cleared")
except Exception as e:
logger.error(f"Error during cleanup: {str(e)}")
model = None
processor = None
logger.info("Shutdown complete")
app = FastAPI(
title="Qwen2.5-VL API",
description="OpenAI-compatible API for Qwen2.5-VL vision-language model",
version="1.0.0",
lifespan=lifespan
)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.get("/v1/models", response_model=ModelList)
async def list_models():
"""List available models"""
return ModelList(
data=[
ModelCard(
id=current_loaded_model,
created=1709251200,
owned_by="Qwen",
permission=[{
"id": current_loaded_model,
"created": 1709251200,
"allow_create_engine": False,
"allow_sampling": True,
"allow_logprobs": True,
"allow_search_indices": False,
"allow_view": True,
"allow_fine_tuning": False,
"organization": "*",
"group": None,
"is_blocking": False
}],
capabilities={
"vision": True,
"chat": True,
"embeddings": False,
"text_completion": True
},
context_window=131072,
max_tokens=8192
)
]
)
@app.post("/v1/chat/completions", response_model=ChatCompletionResponse)
async def chat_completions(request: ChatCompletionRequest):
"""Handle chat completion requests with vision support"""
if request.model != current_loaded_model:
raise HTTPException(
status_code=400,
detail=f"Requested model '{request.model}' not loaded. Current model is {current_loaded_model}"
)
try:
request_start_time = time.time()
logger.info(f"Received chat completion request for model: {request.model}")
logger.info(f"Request content: {request.json()}")
messages = []
for msg in request.messages:
if isinstance(msg.content, str):
messages.append({"role": msg.role, "content": msg.content})
else:
processed_content = []
for content_item in msg.content:
if content_item.type == "text":
processed_content.append({
"type": "text",
"text": content_item.text
})
elif content_item.type == "image_url":
if "url" in content_item.image_url:
if content_item.image_url["url"].startswith("data:image"):
processed_content.append({
"type": "image",
"image": process_base64_image(content_item.image_url["url"])
})
messages.append({"role": msg.role, "content": processed_content})
text = processor.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
image_inputs, video_inputs = process_vision_info(messages)
inputs = processor(
text=[text],
images=image_inputs,
videos=video_inputs,
padding=True,
return_tensors="pt"
).to(device)
generated_ids = model.generate(
**inputs,
max_new_tokens=request.max_tokens,
temperature=request.temperature,
top_p=request.top_p
)
generated_ids_trimmed = [
out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
response = processor.batch_decode(
generated_ids_trimmed,
skip_special_tokens=True,
clean_up_tokenization_spaces=False
)[0]
if request.response_format and request.response_format.get("type") == "json_object":
try:
if response.startswith('```'):
response = '\n'.join(response.split('\n')[1:-1])
if response.startswith('json'):
response = response[4:].lstrip()
content = json.loads(response)
response = json.dumps(content)
except json.JSONDecodeError as e:
logger.error(f"JSON parsing error: {str(e)}")
raise HTTPException(status_code=400, detail=f"Invalid JSON response: {str(e)}")
total_time = time.time() - request_start_time
logger.info(f"Request completed in {total_time:.2f} seconds")
return ChatCompletionResponse(
id=f"chatcmpl-{datetime.now().strftime('%Y%m%d%H%M%S')}",
object="chat.completion",
created=int(datetime.now().timestamp()),
model=request.model,
choices=[{
"index": 0,
"message": {
"role": "assistant",
"content": response
},
"finish_reason": "stop"
}],
usage={
"prompt_tokens": len(inputs.input_ids[0]),
"completion_tokens": len(generated_ids_trimmed[0]),
"total_tokens": len(inputs.input_ids[0]) + len(generated_ids_trimmed[0])
}
)
except Exception as e:
logger.error(f"Request error: {str(e)}", exc_info=True)
if isinstance(e, HTTPException):
raise
raise HTTPException(status_code=500, detail=str(e))
@app.get("/health")
async def health_check():
"""Health check endpoint"""
log_system_info()
return {
"status": "healthy",
"model_loaded": model is not None and processor is not None,
"device": str(device),
"cuda_available": torch.cuda.is_available(),
"quantization": args.quant if args.quant else "none",
"cuda_device_count": torch.cuda.device_count() if torch.cuda.is_available() else 0,
"timestamp": datetime.now().isoformat()
}
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=args.port)