-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathOutlines_for_transformers_vision_batch.py
More file actions
154 lines (130 loc) · 5.37 KB
/
Outlines_for_transformers_vision_batch.py
File metadata and controls
154 lines (130 loc) · 5.37 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
import json
from typing import List, Optional
import outlines
import torch
from PIL import Image
from outlines import samplers
from pydantic import BaseModel, Field
from rich import print
from transformers import AutoProcessor
from transformers import Qwen2_5_VLForConditionalGeneration
class ImageDescription(BaseModel):
"""
Pydantic class to represent an image description.
"""
subject: str = Field(description="The main subject of the image.")
action: Optional[str] = Field(None, description="The action being performed in the image, if any.")
objects: Optional[List[str]] = Field(None, description="A list of objects present in the image.")
scene: Optional[str] = Field(None, description="The general scene or setting of the image.")
setting: Optional[str] = Field(None, description="Specific details of the setting or environment.")
colors: Optional[List[str]] = Field(None, description="Dominant colors present in the image.")
style: Optional[str] = Field(None, description="The artistic style or photographic technique used.")
mood: Optional[str] = Field(None, description="The overall mood or atmosphere of the image.")
composition: Optional[str] = Field(None, description="The composition of the image (e.g., close-up, wide shot).")
lighting: Optional[str] = Field(None, description="The lighting conditions in the image.")
details: Optional[List[str]] = Field(None, description="Specific details or notable features in the image.")
additional_notes: Optional[str] = Field(None, description="Any additional relevant information about the image.")
def load_and_resize_image(image_path, max_size=1024):
"""
Load and resize an image while maintaining aspect ratio
Args:
image_path: Path to the image file
max_size: Maximum dimension (width or height) of the output image
Returns:
PIL Image: Resized image
"""
image = Image.open(image_path)
# Get current dimensions
width, height = image.size
# Calculate scaling factor
scale = min(max_size / width, max_size / height)
# Only resize if image is larger than max_size
if scale < 1:
new_width = int(width * scale)
new_height = int(height * scale)
image = image.resize((new_width, new_height), Image.Resampling.LANCZOS)
return image
def save_resized_image(resized_image, output_path, format="PNG", quality=95):
"""
Save a PIL Image object to a specified path.
Args:
resized_image: The PIL Image object to save.
output_path: The path where the image should be saved.
format: The image format to save as (e.g., "PNG", "JPEG"). Defaults to "PNG".
quality: The quality for JPEG images (0-95, higher is better). Ignored for other formats.
"""
try:
resized_image.save(output_path, format=format, quality=quality)
print(f"Resized image saved to: {output_path}")
except Exception as e:
print(f"Error saving image: {e}")
resized_img = load_and_resize_image(image_path="Inputs/for_enhance.jpg")
resized = "resized.jpg"
save_resized_image(resized_image=resized_img, quality=100, format="JPEG", output_path=resized)
model_name = "Qwen/Qwen2.5-VL-3B-Instruct"
model_class = Qwen2_5_VLForConditionalGeneration
model = outlines.models.transformers_vision(
model_name,
model_class=model_class,
model_kwargs={
"device_map": "auto",
"torch_dtype": torch.bfloat16,
"do_sample": True
}
)
# Corrected messages structure for batch processing
messages = [
[
{
"role": "user",
"content": [
{
"type": "image",
"image": resized_img,
},
{
"type": "text",
"text": f"""You are an expert at generating detailed image descriptions.
Please provide a comprehensive description of the image. Be as detailed as possible.
Return the information in the following JSON schema:
{ImageDescription.model_json_schema()}
""",
},
],
}
],
[
{
"role": "user",
"content": [
{
"type": "image",
"image": resized_img,
},
{
"type": "text",
"text": f"""You are an expert at generating detailed image descriptions.
Please provide a comprehensive description of the image. Be as detailed as possible.
Return the information in the following JSON schema:
{ImageDescription.model_json_schema()}
""",
},
],
}
],
]
# Convert the messages to the final prompt
processor = AutoProcessor.from_pretrained(model_name)
prompts = [processor.apply_chat_template(msg, tokenize=False, add_generation_prompt=True) for msg in messages]
# print(prompts)
image_description_generator = outlines.generate.json(
model,
ImageDescription,
sampler=samplers.GreedySampler()
)
# Generate the image description for each prompt and image
# pass the images within the prompts.
results = image_description_generator(prompts, [[resized_img],[resized_img]])
# Print the results
for result in results:
print(json.dumps(result.model_dump(), indent=4))