Skip to content

Commit ee73341

Browse files
Merge pull request #3139 from captin411/focal-point-cropping
[Preprocess image] New option to auto crop based on complexity, edges, faces
2 parents 7207e3b + df0c5ea commit ee73341

File tree

4 files changed

+394
-5
lines changed

4 files changed

+394
-5
lines changed
Lines changed: 341 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,341 @@
1+
import cv2
2+
import requests
3+
import os
4+
from collections import defaultdict
5+
from math import log, sqrt
6+
import numpy as np
7+
from PIL import Image, ImageDraw
8+
9+
GREEN = "#0F0"
10+
BLUE = "#00F"
11+
RED = "#F00"
12+
13+
14+
def crop_image(im, settings):
15+
""" Intelligently crop an image to the subject matter """
16+
17+
scale_by = 1
18+
if is_landscape(im.width, im.height):
19+
scale_by = settings.crop_height / im.height
20+
elif is_portrait(im.width, im.height):
21+
scale_by = settings.crop_width / im.width
22+
elif is_square(im.width, im.height):
23+
if is_square(settings.crop_width, settings.crop_height):
24+
scale_by = settings.crop_width / im.width
25+
elif is_landscape(settings.crop_width, settings.crop_height):
26+
scale_by = settings.crop_width / im.width
27+
elif is_portrait(settings.crop_width, settings.crop_height):
28+
scale_by = settings.crop_height / im.height
29+
30+
im = im.resize((int(im.width * scale_by), int(im.height * scale_by)))
31+
im_debug = im.copy()
32+
33+
focus = focal_point(im_debug, settings)
34+
35+
# take the focal point and turn it into crop coordinates that try to center over the focal
36+
# point but then get adjusted back into the frame
37+
y_half = int(settings.crop_height / 2)
38+
x_half = int(settings.crop_width / 2)
39+
40+
x1 = focus.x - x_half
41+
if x1 < 0:
42+
x1 = 0
43+
elif x1 + settings.crop_width > im.width:
44+
x1 = im.width - settings.crop_width
45+
46+
y1 = focus.y - y_half
47+
if y1 < 0:
48+
y1 = 0
49+
elif y1 + settings.crop_height > im.height:
50+
y1 = im.height - settings.crop_height
51+
52+
x2 = x1 + settings.crop_width
53+
y2 = y1 + settings.crop_height
54+
55+
crop = [x1, y1, x2, y2]
56+
57+
results = []
58+
59+
results.append(im.crop(tuple(crop)))
60+
61+
if settings.annotate_image:
62+
d = ImageDraw.Draw(im_debug)
63+
rect = list(crop)
64+
rect[2] -= 1
65+
rect[3] -= 1
66+
d.rectangle(rect, outline=GREEN)
67+
results.append(im_debug)
68+
if settings.destop_view_image:
69+
im_debug.show()
70+
71+
return results
72+
73+
def focal_point(im, settings):
74+
corner_points = image_corner_points(im, settings) if settings.corner_points_weight > 0 else []
75+
entropy_points = image_entropy_points(im, settings) if settings.entropy_points_weight > 0 else []
76+
face_points = image_face_points(im, settings) if settings.face_points_weight > 0 else []
77+
78+
pois = []
79+
80+
weight_pref_total = 0
81+
if len(corner_points) > 0:
82+
weight_pref_total += settings.corner_points_weight
83+
if len(entropy_points) > 0:
84+
weight_pref_total += settings.entropy_points_weight
85+
if len(face_points) > 0:
86+
weight_pref_total += settings.face_points_weight
87+
88+
corner_centroid = None
89+
if len(corner_points) > 0:
90+
corner_centroid = centroid(corner_points)
91+
corner_centroid.weight = settings.corner_points_weight / weight_pref_total
92+
pois.append(corner_centroid)
93+
94+
entropy_centroid = None
95+
if len(entropy_points) > 0:
96+
entropy_centroid = centroid(entropy_points)
97+
entropy_centroid.weight = settings.entropy_points_weight / weight_pref_total
98+
pois.append(entropy_centroid)
99+
100+
face_centroid = None
101+
if len(face_points) > 0:
102+
face_centroid = centroid(face_points)
103+
face_centroid.weight = settings.face_points_weight / weight_pref_total
104+
pois.append(face_centroid)
105+
106+
average_point = poi_average(pois, settings)
107+
108+
if settings.annotate_image:
109+
d = ImageDraw.Draw(im)
110+
max_size = min(im.width, im.height) * 0.07
111+
if corner_centroid is not None:
112+
color = BLUE
113+
box = corner_centroid.bounding(max_size * corner_centroid.weight)
114+
d.text((box[0], box[1]-15), "Edge: %.02f" % corner_centroid.weight, fill=color)
115+
d.ellipse(box, outline=color)
116+
if len(corner_points) > 1:
117+
for f in corner_points:
118+
d.rectangle(f.bounding(4), outline=color)
119+
if entropy_centroid is not None:
120+
color = "#ff0"
121+
box = entropy_centroid.bounding(max_size * entropy_centroid.weight)
122+
d.text((box[0], box[1]-15), "Entropy: %.02f" % entropy_centroid.weight, fill=color)
123+
d.ellipse(box, outline=color)
124+
if len(entropy_points) > 1:
125+
for f in entropy_points:
126+
d.rectangle(f.bounding(4), outline=color)
127+
if face_centroid is not None:
128+
color = RED
129+
box = face_centroid.bounding(max_size * face_centroid.weight)
130+
d.text((box[0], box[1]-15), "Face: %.02f" % face_centroid.weight, fill=color)
131+
d.ellipse(box, outline=color)
132+
if len(face_points) > 1:
133+
for f in face_points:
134+
d.rectangle(f.bounding(4), outline=color)
135+
136+
d.ellipse(average_point.bounding(max_size), outline=GREEN)
137+
138+
return average_point
139+
140+
141+
def image_face_points(im, settings):
142+
if settings.dnn_model_path is not None:
143+
detector = cv2.FaceDetectorYN.create(
144+
settings.dnn_model_path,
145+
"",
146+
(im.width, im.height),
147+
0.9, # score threshold
148+
0.3, # nms threshold
149+
5000 # keep top k before nms
150+
)
151+
faces = detector.detect(np.array(im))
152+
results = []
153+
if faces[1] is not None:
154+
for face in faces[1]:
155+
x = face[0]
156+
y = face[1]
157+
w = face[2]
158+
h = face[3]
159+
results.append(
160+
PointOfInterest(
161+
int(x + (w * 0.5)), # face focus left/right is center
162+
int(y + (h * 0.33)), # face focus up/down is close to the top of the head
163+
size = w,
164+
weight = 1/len(faces[1])
165+
)
166+
)
167+
return results
168+
else:
169+
np_im = np.array(im)
170+
gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY)
171+
172+
tries = [
173+
[ f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01 ],
174+
[ f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05 ],
175+
[ f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05 ],
176+
[ f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05 ],
177+
[ f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05 ],
178+
[ f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05 ],
179+
[ f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05 ],
180+
[ f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05 ]
181+
]
182+
for t in tries:
183+
classifier = cv2.CascadeClassifier(t[0])
184+
minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side
185+
try:
186+
faces = classifier.detectMultiScale(gray, scaleFactor=1.1,
187+
minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE)
188+
except:
189+
continue
190+
191+
if len(faces) > 0:
192+
rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces]
193+
return [PointOfInterest((r[0] +r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0]-r[2]), weight=1/len(rects)) for r in rects]
194+
return []
195+
196+
197+
def image_corner_points(im, settings):
198+
grayscale = im.convert("L")
199+
200+
# naive attempt at preventing focal points from collecting at watermarks near the bottom
201+
gd = ImageDraw.Draw(grayscale)
202+
gd.rectangle([0, im.height*.9, im.width, im.height], fill="#999")
203+
204+
np_im = np.array(grayscale)
205+
206+
points = cv2.goodFeaturesToTrack(
207+
np_im,
208+
maxCorners=100,
209+
qualityLevel=0.04,
210+
minDistance=min(grayscale.width, grayscale.height)*0.06,
211+
useHarrisDetector=False,
212+
)
213+
214+
if points is None:
215+
return []
216+
217+
focal_points = []
218+
for point in points:
219+
x, y = point.ravel()
220+
focal_points.append(PointOfInterest(x, y, size=4, weight=1/len(points)))
221+
222+
return focal_points
223+
224+
225+
def image_entropy_points(im, settings):
226+
landscape = im.height < im.width
227+
portrait = im.height > im.width
228+
if landscape:
229+
move_idx = [0, 2]
230+
move_max = im.size[0]
231+
elif portrait:
232+
move_idx = [1, 3]
233+
move_max = im.size[1]
234+
else:
235+
return []
236+
237+
e_max = 0
238+
crop_current = [0, 0, settings.crop_width, settings.crop_height]
239+
crop_best = crop_current
240+
while crop_current[move_idx[1]] < move_max:
241+
crop = im.crop(tuple(crop_current))
242+
e = image_entropy(crop)
243+
244+
if (e > e_max):
245+
e_max = e
246+
crop_best = list(crop_current)
247+
248+
crop_current[move_idx[0]] += 4
249+
crop_current[move_idx[1]] += 4
250+
251+
x_mid = int(crop_best[0] + settings.crop_width/2)
252+
y_mid = int(crop_best[1] + settings.crop_height/2)
253+
254+
return [PointOfInterest(x_mid, y_mid, size=25, weight=1.0)]
255+
256+
257+
def image_entropy(im):
258+
# greyscale image entropy
259+
# band = np.asarray(im.convert("L"))
260+
band = np.asarray(im.convert("1"), dtype=np.uint8)
261+
hist, _ = np.histogram(band, bins=range(0, 256))
262+
hist = hist[hist > 0]
263+
return -np.log2(hist / hist.sum()).sum()
264+
265+
def centroid(pois):
266+
x = [poi.x for poi in pois]
267+
y = [poi.y for poi in pois]
268+
return PointOfInterest(sum(x)/len(pois), sum(y)/len(pois))
269+
270+
271+
def poi_average(pois, settings):
272+
weight = 0.0
273+
x = 0.0
274+
y = 0.0
275+
for poi in pois:
276+
weight += poi.weight
277+
x += poi.x * poi.weight
278+
y += poi.y * poi.weight
279+
avg_x = round(x / weight)
280+
avg_y = round(y / weight)
281+
282+
return PointOfInterest(avg_x, avg_y)
283+
284+
285+
def is_landscape(w, h):
286+
return w > h
287+
288+
289+
def is_portrait(w, h):
290+
return h > w
291+
292+
293+
def is_square(w, h):
294+
return w == h
295+
296+
297+
def download_and_cache_models(dirname):
298+
download_url = 'https://github.com/opencv/opencv_zoo/blob/91fb0290f50896f38a0ab1e558b74b16bc009428/models/face_detection_yunet/face_detection_yunet_2022mar.onnx?raw=true'
299+
model_file_name = 'face_detection_yunet.onnx'
300+
301+
if not os.path.exists(dirname):
302+
os.makedirs(dirname)
303+
304+
cache_file = os.path.join(dirname, model_file_name)
305+
if not os.path.exists(cache_file):
306+
print(f"downloading face detection model from '{download_url}' to '{cache_file}'")
307+
response = requests.get(download_url)
308+
with open(cache_file, "wb") as f:
309+
f.write(response.content)
310+
311+
if os.path.exists(cache_file):
312+
return cache_file
313+
return None
314+
315+
316+
class PointOfInterest:
317+
def __init__(self, x, y, weight=1.0, size=10):
318+
self.x = x
319+
self.y = y
320+
self.weight = weight
321+
self.size = size
322+
323+
def bounding(self, size):
324+
return [
325+
self.x - size//2,
326+
self.y - size//2,
327+
self.x + size//2,
328+
self.y + size//2
329+
]
330+
331+
332+
class Settings:
333+
def __init__(self, crop_width=512, crop_height=512, corner_points_weight=0.5, entropy_points_weight=0.5, face_points_weight=0.5, annotate_image=False, dnn_model_path=None):
334+
self.crop_width = crop_width
335+
self.crop_height = crop_height
336+
self.corner_points_weight = corner_points_weight
337+
self.entropy_points_weight = entropy_points_weight
338+
self.face_points_weight = face_points_weight
339+
self.annotate_image = annotate_image
340+
self.destop_view_image = False
341+
self.dnn_model_path = dnn_model_path

0 commit comments

Comments
 (0)