|
1 | 1 | #!/usr/bin/python |
2 | | -# The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt |
3 | | -# |
4 | | -# This example shows how to use dlib's face recognition tool for image alignment. |
5 | | -# |
6 | | -# COMPILING/INSTALLING THE DLIB PYTHON INTERFACE |
7 | | -# You can install dlib using the command: |
8 | | -# pip install dlib |
9 | | -# |
10 | | -# Alternatively, if you want to compile dlib yourself then go into the dlib |
11 | | -# root folder and run: |
12 | | -# python setup.py install |
13 | | -# |
14 | | -# Compiling dlib should work on any operating system so long as you have |
15 | | -# CMake installed. On Ubuntu, this can be done easily by running the |
16 | | -# command: |
17 | | -# sudo apt-get install cmake |
18 | | -# |
19 | | -# Also note that this example requires Numpy which can be installed |
20 | | -# via the command: |
21 | | -# pip install numpy |
22 | | - |
23 | | -import sys |
24 | 2 |
|
25 | 3 | import dlib, cv2 |
26 | 4 | import numpy as np |
27 | 5 |
|
28 | | -'''if len(sys.argv) != 3: |
29 | | - print( |
30 | | - "Call this program like this:\n" |
31 | | - " ./face_alignment.py shape_predictor_5_face_landmarks.dat ../examples/faces/bald_guys.jpg\n" |
32 | | - "You can download a trained facial shape predictor from:\n" |
33 | | - " http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2\n") |
34 | | - exit()''' |
| 6 | +predictor_path = 'models/shape_predictor_68_face_landmarks.dat' |
35 | 7 |
|
36 | | -predictor_path = 'resources/shape_predictor_68_face_landmarks.dat' |
37 | | -# face_file_path = 'dbs/lfw-deepfunneled/Aaron_Eckhart/Aaron_Eckhart_0001.jpg' |
38 | | - |
39 | | -# Load all the models we need: a detector to find the faces, a shape predictor |
40 | | -# to find face landmarks so we can precisely localize the face |
41 | 8 | detector = dlib.get_frontal_face_detector() |
42 | 9 | sp = dlib.shape_predictor(predictor_path) |
43 | 10 |
|
44 | 11 |
|
45 | 12 | def crop(face_file_path): |
46 | | - # Load the image using Dlib |
47 | | - # img = dlib.load_rgb_image(face_file_path) |
48 | 13 | img = cv2.imread(face_file_path) |
49 | 14 |
|
50 | | - # Ask the detector to find the bounding boxes of each face. The 1 in the |
51 | | - # second argument indicates that we should upsample the image 1 time. This |
52 | | - # will make everything bigger and allow us to detect more faces. |
53 | 15 | dets = detector(img, 1) |
54 | 16 |
|
55 | 17 | num_faces = len(dets) |
56 | 18 | if num_faces == 0: |
57 | 19 | print("Sorry, there were no faces found in '{}'".format(face_file_path)) |
58 | 20 | return False, None |
59 | 21 |
|
60 | | - # Find the 5 face landmarks we need to do the alignment. |
61 | 22 | faces = dlib.full_object_detections() |
62 | 23 | for detection in dets: |
63 | 24 | faces.append(sp(img, detection)) |
64 | 25 |
|
65 | | - # window = dlib.image_window() |
66 | | - |
67 | | - # Get the aligned face images |
68 | | - # Optionally: |
69 | | - # images = dlib.get_face_chips(img, faces, size=160, padding=0.25) |
70 | 26 | images = dlib.get_face_chips(img, faces, size=160) |
71 | | - # window.set_image(image) |
72 | 27 |
|
73 | | - # TODO we can choose faces |
74 | | - # cv2.imshow("img", images[0]) |
75 | | - # cv2.waitKey() |
76 | | - # cv2.imwrite(output_path, images[0]) |
77 | 28 | ret = cv2.cvtColor(images[0], cv2.COLOR_BGR2RGB) |
78 | 29 | return img, cv2.imencode('.jpg', img)[1] |
79 | 30 |
|
80 | 31 |
|
81 | | -def cropFileStorageObject(fileStorage): |
82 | | - # Load the image using Dlib |
83 | | - # img = dlib.load_rgb_image(face_file_path) |
84 | | - # read image file string data |
85 | | - filestr = fileStorage.read() |
86 | | - # convert string data to numpy array |
| 32 | +def cropFileStorageObject(file_storage): |
| 33 | + |
| 34 | + filestr = file_storage.read() |
| 35 | + |
87 | 36 | npimg = np.fromstring(filestr, np.uint8) |
88 | | - # convert numpy array to image |
| 37 | + |
89 | 38 | img = cv2.imdecode(npimg, cv2.IMREAD_COLOR) |
90 | 39 |
|
91 | | - # Ask the detector to find the bounding boxes of each face. The 1 in the |
92 | | - # second argument indicates that we should upsample the image 1 time. This |
93 | | - # will make everything bigger and allow us to detect more faces. |
94 | 40 | dets = detector(img, 1) |
95 | 41 |
|
96 | 42 | num_faces = len(dets) |
97 | 43 | if num_faces == 0: |
98 | | - print("Sorry, there were no faces found in '{}'".format(fileStorage)) |
| 44 | + print("Sorry, there were no faces found in '{}'".format(file_storage)) |
99 | 45 | return False, None |
100 | 46 |
|
101 | | - # Find the 5 face landmarks we need to do the alignment. |
102 | 47 | faces = dlib.full_object_detections() |
103 | 48 | for detection in dets: |
104 | 49 | faces.append(sp(img, detection)) |
105 | 50 |
|
106 | | - # window = dlib.image_window() |
107 | | - |
108 | | - # Get the aligned face images |
109 | | - # Optionally: |
110 | | - # images = dlib.get_face_chips(img, faces, size=160, padding=0.25) |
111 | | - images = dlib.get_face_chips(img, faces, size=160) |
112 | | - # window.set_image(image) |
| 51 | + # images = dlib.get_face_chips(img, faces, size=160) |
113 | 52 |
|
114 | 53 | # TODO we can choose faces |
115 | | - # cv2.imshow("img", images[0]) |
116 | | - # cv2.waitKey() |
117 | | - # cv2.imwrite(output_path, images[0]) |
118 | | - ret = cv2.cvtColor(images[0], cv2.COLOR_BGR2RGB) |
| 54 | + # ret = cv2.cvtColor(images[0], cv2.COLOR_BGR2RGB) |
119 | 55 | return img, cv2.imencode('.jpg', img)[1] |
120 | 56 |
|
121 | | - |
122 | | -if __name__ == '__main__': |
123 | | - pass |
124 | | - # crop(face_file_path) |
125 | | - |
126 | | -# It is also possible to get a single chip |
127 | | -# image = dlib.get_face_chip(img, faces[0]) |
128 | | -# window.set_image(image) |
129 | | -# dlib.hit_enter_to_continue() |
0 commit comments