-
Notifications
You must be signed in to change notification settings - Fork 356
Expand file tree
/
Copy pathcodeformer_util.py
More file actions
542 lines (452 loc) · 22.2 KB
/
codeformer_util.py
File metadata and controls
542 lines (452 loc) · 22.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
import os
import cv2
import numpy as np
from math import ceil
from itertools import product as product
import ailia
import sys
sys.path.append('../ailia-models/util')
from nms_utils import *
sys.path.append('../../face_detection/retinaface')
import retinaface_utils as rut
from retinaface_utils import PriorBox
CONFIDENCE_THRES = 0.8
NMS_THRES = 0.4
TOP_K = 5000
KEEP_TOP_K = 750
def postprocessing(preds_ailia, input_data, cfg, dim):
IMAGE_WIDTH, IMAGE_HEIGHT = dim
scale = np.array([IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_HEIGHT])
loc, conf, landms = preds_ailia
priorbox = PriorBox(cfg, image_size=(IMAGE_HEIGHT, IMAGE_WIDTH))
priors = priorbox.forward()
boxes = rut.decode(np.squeeze(loc, axis=0), priors, cfg['variance'])
boxes = boxes * scale
scores = np.squeeze(conf, axis=0)[:, 1]
landms = rut.decode_landm(np.squeeze(landms, axis=0), priors, cfg['variance'])
scale1 = np.array([input_data.shape[3], input_data.shape[2], input_data.shape[3], input_data.shape[2],
input_data.shape[3], input_data.shape[2], input_data.shape[3], input_data.shape[2],
input_data.shape[3], input_data.shape[2]])
landms = landms * scale1
# ignore low scores
inds = np.where(scores > CONFIDENCE_THRES)[0]
boxes = boxes[inds]
landms = landms[inds]
scores = scores[inds]
# keep top-K before NMS
order = scores.argsort()[::-1][:TOP_K]
boxes = boxes[order]
landms = landms[order]
scores = scores[order]
# do NMS
dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
keep = rut.py_cpu_nms(dets, NMS_THRES)
dets = dets[keep, :]
landms = landms[keep]
# keep top-K faster NMS
dets = dets[:KEEP_TOP_K, :]
landms = landms[:KEEP_TOP_K, :]
detections = np.concatenate((dets, landms), axis=1)
return detections
def img2tensor(imgs, bgr2rgb=True, float32=True):
if imgs.shape[2] == 3 and bgr2rgb:
if imgs.dtype == 'float64':
imgs = imgs.astype('float32')
imgs = cv2.cvtColor(imgs, cv2.COLOR_BGR2RGB)
imgs = imgs.transpose(2, 0, 1)
return imgs
def tensor2img(tensor, rgb2bgr=True, out_type=np.uint8, min_max=(0, 1)):
result = []
# tensor 3ch
_tensor =tensor[0]
_tensor = _tensor.clip(min_max[0],min_max[1])
_tensor = (_tensor - min_max[0]) / (min_max[1] - min_max[0])
n_dim = _tensor.ndim
if n_dim == 3:
img_np = _tensor
img_np = img_np.transpose(1, 2, 0)
if img_np.shape[2] == 1: # gray image
img_np = np.squeeze(img_np, axis=2)
else:
if rgb2bgr:
img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
elif n_dim == 2:
img_np = _tensor
else:
raise TypeError('Only support 4D, 3D or 2D tensor. ' f'But received with dimension: {n_dim}')
if out_type == np.uint8:
# Unlike MATLAB, numpy.unit8() WILL NOT round by default.
img_np = (img_np * 255.0).round()
img_np = img_np.astype(out_type)
result.append(img_np)
if len(result) == 1:
result = result[0]
return result
def is_gray(img, threshold=10):
if img.shape[2] == 1:
return True
img1 = img[:,:,0]
img2 = img[:,:,1]
img3 = img[:,:,2]
diff1 = np.sum(img1 - img2)
diff2 = np.sum(img2 - img3)
diff3 = np.sum(img3 - img1)
diff_sum = (diff1 + diff2 + diff3) / 3.0
if diff_sum <= threshold:
return True
else:
return False
def bgr2gray(img, out_channel=3):
b, g, r = img[:,:,0], img[:,:,1], img[:,:,2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
if out_channel == 3:
gray = gray[:,:,np.newaxis].repeat(3, axis=2)
return gray
def adain_npy(content_feat, style_feat):
"""Adaptive instance normalization for numpy.
Args:
content_feat (numpy): The input feature.
style_feat (numpy): The reference feature.
"""
size = content_feat.shape
style_mean, style_std = calc_mean_std(style_feat)
content_mean, content_std = calc_mean_std(content_feat)
normalized_feat = (content_feat - np.broadcast_to(content_mean, size)) / np.broadcast_to(content_std, size)
return normalized_feat * np.broadcast_to(style_std, size) + np.broadcast_to(style_mean, size)
class FaceRestoreHelper(object):
"""Helper for the face restoration pipeline (base class)."""
def __init__(self,
upscale_factor,
face_size=512,
crop_ratio=(1, 1),
det_model='retinaface_resnet50',
save_ext='png',
template_3points=False,
pad_blur=False,
use_parse=False,
args=None):
self.template_3points = template_3points # improve robustness
self.upscale_factor = int(upscale_factor)
# the cropped face ratio based on the square face
self.crop_ratio = crop_ratio # (h, w)
assert (self.crop_ratio[0] >= 1 and self.crop_ratio[1] >= 1), 'crop ration only supports >=1'
self.face_size = (int(face_size * self.crop_ratio[1]), int(face_size * self.crop_ratio[0]))
self.det_model = det_model
if self.template_3points:
self.face_template = np.array([[192, 240], [319, 240], [257, 371]])
else:
# standard 5 landmarks for FFHQ faces with 512 x 512
# facexlib
self.face_template = np.array([[192.98138, 239.94708], [318.90277, 240.1936], [256.63416, 314.01935],
[201.26117, 371.41043], [313.08905, 371.15118]])
# self.face_template = np.array([[193.65928, 242.98541], [318.32558, 243.06108], [255.67984, 328.82894],
# [198.22603, 372.82502], [313.91018, 372.75659]])
self.face_template = self.face_template * (face_size / 512.0)
if self.crop_ratio[0] > 1:
self.face_template[:, 1] += face_size * (self.crop_ratio[0] - 1) / 2
if self.crop_ratio[1] > 1:
self.face_template[:, 0] += face_size * (self.crop_ratio[1] - 1) / 2
self.save_ext = save_ext
self.pad_blur = pad_blur
if self.pad_blur is True:
self.template_3points = False
self.all_landmarks_5 = []
self.det_faces = []
self.affine_matrices = []
self.inverse_affine_matrices = []
self.cropped_faces = []
self.restored_faces = []
self.pad_input_imgs = []
# init face parsing model
self.use_parse = use_parse
# retinaface net initialize
if det_model == "retinaface_mobile0.25":
self.cfg = rut.cfg_mnet
elif det_model == "retinaface_resnet50":
self.cfg = rut.cfg_re50
mem_mode = ailia.get_memory_mode(reduce_constant=True, reuse_interstage=True)
self.net = ailia.Net(None,"face_parse.onnx",env_id=args.env_id, memory_mode=mem_mode)
self.retinaface = ailia.Net(None, det_model+".onnx",env_id=args.env_id, memory_mode=mem_mode)
def set_upscale_factor(self, upscale_factor):
self.upscale_factor = upscale_factor
def read_image(self, img):
"""img can be image path or cv2 loaded image."""
if isinstance(img, str):
img = cv2.imread(img)
if np.max(img) > 256: # 16-bit image
img = img / 65535 * 255
if len(img.shape) == 2: # gray image
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
elif img.shape[2] == 4: # BGRA image with alpha channel
img = img[:, :, 0:3]
self.input_img = img
self.is_gray = is_gray(img, threshold=10)
if self.is_gray:
print('Grayscale input: True')
if min(self.input_img.shape[:2])<512:
f = 512.0/min(self.input_img.shape[:2])
self.input_img = cv2.resize(self.input_img, (0,0), fx=f, fy=f, interpolation=cv2.INTER_LINEAR)
def get_face_landmarks_5(self,
only_keep_largest=False,
only_center_face=False,
resize=None,
blur_ratio=0.01,
eye_dist_threshold=None):
if resize is None:
scale = 1
input_img = self.input_img
else:
h, w = self.input_img.shape[0:2]
scale = resize / min(h, w)
scale = max(1, scale) # always scale up
h, w = int(h * scale), int(w * scale)
interp = cv2.INTER_AREA if scale < 1 else cv2.INTER_LINEAR
input_img = cv2.resize(self.input_img, (w, h), interpolation=interp)
input_data = input_img
dim = (640,640)
img = input_data - (104, 117, 123)
input_data = img.transpose(2, 0, 1)
input_data.shape = (1,) + input_data.shape
preds_ailia = self.retinaface.predict([input_data])
detections = postprocessing(preds_ailia, input_data, self.cfg, dim)
bboxes = detections
if bboxes is None or bboxes.shape[0] == 0:
return 0
else:
bboxes = bboxes / scale
for bbox in bboxes:
# remove faces with too small eye distance: side faces or too small faces
eye_dist = np.linalg.norm([bbox[6] - bbox[8], bbox[7] - bbox[9]])
if eye_dist_threshold is not None and (eye_dist < eye_dist_threshold):
continue
if self.template_3points:
landmark = np.array([[bbox[i], bbox[i + 1]] for i in range(5, 11, 2)])
else:
landmark = np.array([[bbox[i], bbox[i + 1]] for i in range(5, 15, 2)])
self.all_landmarks_5.append(landmark)
self.det_faces.append(bbox[0:5])
if len(self.det_faces) == 0:
return 0
if only_keep_largest:
h, w, _ = self.input_img.shape
self.det_faces, largest_idx = get_largest_face(self.det_faces, h, w)
self.all_landmarks_5 = [self.all_landmarks_5[largest_idx]]
elif only_center_face:
h, w, _ = self.input_img.shape
self.det_faces, center_idx = get_center_face(self.det_faces, h, w)
self.all_landmarks_5 = [self.all_landmarks_5[center_idx]]
# pad blurry images
if self.pad_blur:
self.pad_input_imgs = []
for landmarks in self.all_landmarks_5:
# get landmarks
eye_left = landmarks[0, :]
eye_right = landmarks[1, :]
eye_avg = (eye_left + eye_right) * 0.5
mouth_avg = (landmarks[3, :] + landmarks[4, :]) * 0.5
eye_to_eye = eye_right - eye_left
eye_to_mouth = mouth_avg - eye_avg
# Get the oriented crop rectangle
# x: half width of the oriented crop rectangle
x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]
# - np.flipud(eye_to_mouth) * [-1, 1]: rotate 90 clockwise
# norm with the hypotenuse: get the direction
x /= np.hypot(*x) # get the hypotenuse of a right triangle
rect_scale = 1.5
x *= max(np.hypot(*eye_to_eye) * 2.0 * rect_scale, np.hypot(*eye_to_mouth) * 1.8 * rect_scale)
# y: half height of the oriented crop rectangle
y = np.flipud(x) * [-1, 1]
# c: center
c = eye_avg + eye_to_mouth * 0.1
# quad: (left_top, left_bottom, right_bottom, right_top)
quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
# qsize: side length of the square
qsize = np.hypot(*x) * 2
border = max(int(np.rint(qsize * 0.1)), 3)
# get pad
# pad: (width_left, height_top, width_right, height_bottom)
pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
int(np.ceil(max(quad[:, 1]))))
pad = [
max(-pad[0] + border, 1),
max(-pad[1] + border, 1),
max(pad[2] - self.input_img.shape[0] + border, 1),
max(pad[3] - self.input_img.shape[1] + border, 1)
]
if max(pad) > 1:
# pad image
pad_img = np.pad(self.input_img, ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
# modify landmark coords
landmarks[:, 0] += pad[0]
landmarks[:, 1] += pad[1]
# blur pad images
h, w, _ = pad_img.shape
y, x, _ = np.ogrid[:h, :w, :1]
mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0],
np.float32(w - 1 - x) / pad[2]),
1.0 - np.minimum(np.float32(y) / pad[1],
np.float32(h - 1 - y) / pad[3]))
blur = int(qsize * blur_ratio)
if blur % 2 == 0:
blur += 1
blur_img = cv2.boxFilter(pad_img, 0, ksize=(blur, blur))
# blur_img = cv2.GaussianBlur(pad_img, (blur, blur), 0)
pad_img = pad_img.astype('float32')
pad_img += (blur_img - pad_img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
pad_img += (np.median(pad_img, axis=(0, 1)) - pad_img) * np.clip(mask, 0.0, 1.0)
pad_img = np.clip(pad_img, 0, 255) # float32, [0, 255]
self.pad_input_imgs.append(pad_img)
else:
self.pad_input_imgs.append(np.copy(self.input_img))
return len(self.all_landmarks_5)
def align_warp_face(self, save_cropped_path=None, border_mode='constant'):
"""Align and warp faces with face template.
"""
if self.pad_blur:
assert len(self.pad_input_imgs) == len(
self.all_landmarks_5), f'Mismatched samples: {len(self.pad_input_imgs)} and {len(self.all_landmarks_5)}'
for idx, landmark in enumerate(self.all_landmarks_5):
# use 5 landmarks to get affine matrix
# use cv2.LMEDS method for the equivalence to skimage transform
# ref: https://blog.csdn.net/yichxi/article/details/115827338
affine_matrix = cv2.estimateAffinePartial2D(landmark, self.face_template, method=cv2.LMEDS)[0]
self.affine_matrices.append(affine_matrix)
# warp and crop faces
if border_mode == 'constant':
border_mode = cv2.BORDER_CONSTANT
elif border_mode == 'reflect101':
border_mode = cv2.BORDER_REFLECT101
elif border_mode == 'reflect':
border_mode = cv2.BORDER_REFLECT
if self.pad_blur:
input_img = self.pad_input_imgs[idx]
else:
input_img = self.input_img
cropped_face = cv2.warpAffine(
input_img, affine_matrix, self.face_size, borderMode=border_mode, borderValue=(135, 133, 132)) # gray
self.cropped_faces.append(cropped_face)
def get_inverse_affine(self, save_inverse_affine_path=None):
"""Get inverse affine matrix."""
for idx, affine_matrix in enumerate(self.affine_matrices):
inverse_affine = cv2.invertAffineTransform(affine_matrix)
inverse_affine *= self.upscale_factor
self.inverse_affine_matrices.append(inverse_affine)
def add_restored_face(self, restored_face, input_face=None):
if self.is_gray:
restored_face = bgr2gray(restored_face) # convert img into grayscale
if input_face is not None:
restored_face = adain_npy(restored_face, input_face) # transfer the color
self.restored_faces.append(restored_face)
def paste_faces_to_input_image(self, save_path=None, upsample_img=None, draw_box=False, face_upsampler=None):
h, w, _ = self.input_img.shape
h_up, w_up = int(h * self.upscale_factor), int(w * self.upscale_factor)
if upsample_img is None:
# simply resize the background
# upsample_img = cv2.resize(self.input_img, (w_up, h_up), interpolation=cv2.INTER_LANCZOS4)
upsample_img = cv2.resize(self.input_img, (w_up, h_up), interpolation=cv2.INTER_LINEAR)
else:
upsample_img = cv2.resize(upsample_img, (w_up, h_up), interpolation=cv2.INTER_LANCZOS4)
assert len(self.restored_faces) == len(
self.inverse_affine_matrices), ('length of restored_faces and affine_matrices are different.')
inv_mask_borders = []
for restored_face, inverse_affine in zip(self.restored_faces, self.inverse_affine_matrices):
if face_upsampler is not None:
restored_face = face_upsampler.enhance(restored_face, outscale=self.upscale_factor)[0]
inverse_affine /= self.upscale_factor
inverse_affine[:, 2] *= self.upscale_factor
face_size = (self.face_size[0]*self.upscale_factor, self.face_size[1]*self.upscale_factor)
else:
# Add an offset to inverse affine matrix, for more precise back alignment
if self.upscale_factor > 1:
extra_offset = 0.5 * self.upscale_factor
else:
extra_offset = 0
inverse_affine[:, 2] += extra_offset
face_size = self.face_size
inv_restored = cv2.warpAffine(restored_face, inverse_affine, (w_up, h_up))
# always use square mask
mask = np.ones(face_size, dtype=np.float32)
inv_mask = cv2.warpAffine(mask, inverse_affine, (w_up, h_up))
# remove the black borders
inv_mask_erosion = cv2.erode(
inv_mask, np.ones((int(2 * self.upscale_factor), int(2 * self.upscale_factor)), np.uint8))
pasted_face = inv_mask_erosion[:, :, None] * inv_restored
total_face_area = np.sum(inv_mask_erosion) # // 3
# add border
if draw_box:
h, w = face_size
mask_border = np.ones((h, w, 3), dtype=np.float32)
border = int(1400/np.sqrt(total_face_area))
mask_border[border:h-border, border:w-border,:] = 0
inv_mask_border = cv2.warpAffine(mask_border, inverse_affine, (w_up, h_up))
inv_mask_borders.append(inv_mask_border)
# compute the fusion edge based on the area of face
w_edge = int(total_face_area**0.5) // 20
erosion_radius = w_edge * 2
inv_mask_center = cv2.erode(inv_mask_erosion, np.ones((erosion_radius, erosion_radius), np.uint8))
blur_size = w_edge * 2
inv_soft_mask = cv2.GaussianBlur(inv_mask_center, (blur_size + 1, blur_size + 1), 0)
if len(upsample_img.shape) == 2: # upsample_img is gray image
upsample_img = upsample_img[:, :, None]
inv_soft_mask = inv_soft_mask[:, :, None]
# parse mask
if self.use_parse:
# inference
face_input = cv2.resize(restored_face, (512, 512), interpolation=cv2.INTER_LINEAR)
face_input = img2tensor(face_input.astype('float32') / 255., bgr2rgb=True, float32=True)
mean = np.array([0.5,0.5,0.5])
std = np.array([0.5,0.5,0.5])
face_input = face_input
for i in range(3):
face_input[i,:, :] = (face_input[i,:, :] - mean[i]) / std[i]
face_input = np.expand_dims(face_input,0)
out = self.net.run(face_input)[0]
out = np.argmax(out,axis=1).squeeze()#.cpu().numpy()
parse_mask = np.zeros(out.shape)
MASK_COLORMAP = [0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 255, 0, 0, 0]
for idx, color in enumerate(MASK_COLORMAP):
parse_mask[out == idx] = color
# blur the mask
parse_mask = cv2.GaussianBlur(parse_mask, (101, 101), 11)
parse_mask = cv2.GaussianBlur(parse_mask, (101, 101), 11)
# remove the black borders
thres = 10
parse_mask[:thres, :] = 0
parse_mask[-thres:, :] = 0
parse_mask[:, :thres] = 0
parse_mask[:, -thres:] = 0
parse_mask = parse_mask / 255.
parse_mask = cv2.resize(parse_mask, face_size)
parse_mask = cv2.warpAffine(parse_mask, inverse_affine, (w_up, h_up), flags=3)
inv_soft_parse_mask = parse_mask[:, :, None]
# pasted_face = inv_restored
fuse_mask = (inv_soft_parse_mask<inv_soft_mask).astype('int')
inv_soft_mask = inv_soft_parse_mask*fuse_mask + inv_soft_mask*(1-fuse_mask)
if len(upsample_img.shape) == 3 and upsample_img.shape[2] == 4: # alpha channel
alpha = upsample_img[:, :, 3:]
upsample_img = inv_soft_mask * pasted_face + (1 - inv_soft_mask) * upsample_img[:, :, 0:3]
upsample_img = np.concatenate((upsample_img, alpha), axis=2)
else:
upsample_img = inv_soft_mask * pasted_face + (1 - inv_soft_mask) * upsample_img
if np.max(upsample_img) > 256: # 16-bit image
upsample_img = upsample_img.astype(np.uint16)
else:
upsample_img = upsample_img.astype(np.uint8)
# draw bounding box
if draw_box:
# upsample_input_img = cv2.resize(input_img, (w_up, h_up))
img_color = np.ones([*upsample_img.shape], dtype=np.float32)
img_color[:,:,0] = 0
img_color[:,:,1] = 255
img_color[:,:,2] = 0
for inv_mask_border in inv_mask_borders:
upsample_img = inv_mask_border * img_color + (1 - inv_mask_border) * upsample_img
return upsample_img
def parameter_reinit(self):
self.all_landmarks_5 = []
self.det_faces = []
self.affine_matrices = []
self.inverse_affine_matrices = []
self.cropped_faces = []
self.restored_faces = []
self.pad_input_imgs = []