Skip to content

Commit 6876e6b

Browse files
committed
reformating
1 parent c0eb780 commit 6876e6b

File tree

8 files changed

+20
-111
lines changed

8 files changed

+20
-111
lines changed

cropper.py

Lines changed: 9 additions & 82 deletions
Original file line numberDiff line numberDiff line change
@@ -1,129 +1,56 @@
11
#!/usr/bin/python
2-
# The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt
3-
#
4-
# This example shows how to use dlib's face recognition tool for image alignment.
5-
#
6-
# COMPILING/INSTALLING THE DLIB PYTHON INTERFACE
7-
# You can install dlib using the command:
8-
# pip install dlib
9-
#
10-
# Alternatively, if you want to compile dlib yourself then go into the dlib
11-
# root folder and run:
12-
# python setup.py install
13-
#
14-
# Compiling dlib should work on any operating system so long as you have
15-
# CMake installed. On Ubuntu, this can be done easily by running the
16-
# command:
17-
# sudo apt-get install cmake
18-
#
19-
# Also note that this example requires Numpy which can be installed
20-
# via the command:
21-
# pip install numpy
22-
23-
import sys
242

253
import dlib, cv2
264
import numpy as np
275

28-
'''if len(sys.argv) != 3:
29-
print(
30-
"Call this program like this:\n"
31-
" ./face_alignment.py shape_predictor_5_face_landmarks.dat ../examples/faces/bald_guys.jpg\n"
32-
"You can download a trained facial shape predictor from:\n"
33-
" http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2\n")
34-
exit()'''
6+
predictor_path = 'models/shape_predictor_68_face_landmarks.dat'
357

36-
predictor_path = 'resources/shape_predictor_68_face_landmarks.dat'
37-
# face_file_path = 'dbs/lfw-deepfunneled/Aaron_Eckhart/Aaron_Eckhart_0001.jpg'
38-
39-
# Load all the models we need: a detector to find the faces, a shape predictor
40-
# to find face landmarks so we can precisely localize the face
418
detector = dlib.get_frontal_face_detector()
429
sp = dlib.shape_predictor(predictor_path)
4310

4411

4512
def crop(face_file_path):
46-
# Load the image using Dlib
47-
# img = dlib.load_rgb_image(face_file_path)
4813
img = cv2.imread(face_file_path)
4914

50-
# Ask the detector to find the bounding boxes of each face. The 1 in the
51-
# second argument indicates that we should upsample the image 1 time. This
52-
# will make everything bigger and allow us to detect more faces.
5315
dets = detector(img, 1)
5416

5517
num_faces = len(dets)
5618
if num_faces == 0:
5719
print("Sorry, there were no faces found in '{}'".format(face_file_path))
5820
return False, None
5921

60-
# Find the 5 face landmarks we need to do the alignment.
6122
faces = dlib.full_object_detections()
6223
for detection in dets:
6324
faces.append(sp(img, detection))
6425

65-
# window = dlib.image_window()
66-
67-
# Get the aligned face images
68-
# Optionally:
69-
# images = dlib.get_face_chips(img, faces, size=160, padding=0.25)
7026
images = dlib.get_face_chips(img, faces, size=160)
71-
# window.set_image(image)
7227

73-
# TODO we can choose faces
74-
# cv2.imshow("img", images[0])
75-
# cv2.waitKey()
76-
# cv2.imwrite(output_path, images[0])
7728
ret = cv2.cvtColor(images[0], cv2.COLOR_BGR2RGB)
7829
return img, cv2.imencode('.jpg', img)[1]
7930

8031

81-
def cropFileStorageObject(fileStorage):
82-
# Load the image using Dlib
83-
# img = dlib.load_rgb_image(face_file_path)
84-
# read image file string data
85-
filestr = fileStorage.read()
86-
# convert string data to numpy array
32+
def cropFileStorageObject(file_storage):
33+
34+
filestr = file_storage.read()
35+
8736
npimg = np.fromstring(filestr, np.uint8)
88-
# convert numpy array to image
37+
8938
img = cv2.imdecode(npimg, cv2.IMREAD_COLOR)
9039

91-
# Ask the detector to find the bounding boxes of each face. The 1 in the
92-
# second argument indicates that we should upsample the image 1 time. This
93-
# will make everything bigger and allow us to detect more faces.
9440
dets = detector(img, 1)
9541

9642
num_faces = len(dets)
9743
if num_faces == 0:
98-
print("Sorry, there were no faces found in '{}'".format(fileStorage))
44+
print("Sorry, there were no faces found in '{}'".format(file_storage))
9945
return False, None
10046

101-
# Find the 5 face landmarks we need to do the alignment.
10247
faces = dlib.full_object_detections()
10348
for detection in dets:
10449
faces.append(sp(img, detection))
10550

106-
# window = dlib.image_window()
107-
108-
# Get the aligned face images
109-
# Optionally:
110-
# images = dlib.get_face_chips(img, faces, size=160, padding=0.25)
111-
images = dlib.get_face_chips(img, faces, size=160)
112-
# window.set_image(image)
51+
# images = dlib.get_face_chips(img, faces, size=160)
11352

11453
# TODO we can choose faces
115-
# cv2.imshow("img", images[0])
116-
# cv2.waitKey()
117-
# cv2.imwrite(output_path, images[0])
118-
ret = cv2.cvtColor(images[0], cv2.COLOR_BGR2RGB)
54+
# ret = cv2.cvtColor(images[0], cv2.COLOR_BGR2RGB)
11955
return img, cv2.imencode('.jpg', img)[1]
12056

121-
122-
if __name__ == '__main__':
123-
pass
124-
# crop(face_file_path)
125-
126-
# It is also possible to get a single chip
127-
# image = dlib.get_face_chip(img, faces[0])
128-
# window.set_image(image)
129-
# dlib.hit_enter_to_continue()

main.py

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,10 @@
11
from flask import Flask, render_template, request
2-
from werkzeug.utils import secure_filename
3-
import os
4-
import random
5-
import string
62
import predict
73
import cropper
84
import time
95
import base64
106

117
app = Flask(__name__)
12-
output_path = 'temp/'
138

149

1510
@app.route('/')
@@ -21,20 +16,26 @@ def main():
2116
def upload_file():
2217
return render_template('upload.html')
2318

19+
20+
"""
2421
@app.route('/src/<folder>/<img>')
2522
def serve(folder, img):
2623
app.send_file('src/' + folder + '/' + img)
24+
"""
25+
2726

28-
@app.route('/uploader', methods=['GET', 'POST'])
27+
@app.route('/uploader', methods=['POST'])
2928
def file_uploaded():
3029
if request.method == 'POST':
3130
f = request.files['file']
3231
start = time.time()
3332
img, base64compat = cropper.cropFileStorageObject(f)
3433
labels = predict.predictFromTensor(img)
35-
else: raise TypeError
36-
37-
return render_template('results.html', labels=labels, base64=base64.b64encode(base64compat).decode(), time=time.time()-start)
34+
else:
35+
raise TypeError
36+
37+
return render_template('results.html', labels=labels, base64=base64.b64encode(base64compat).decode(),
38+
time=time.time() - start)
3839

3940

4041
if __name__ == '__main__':
File renamed without changes.

predict.py

Lines changed: 1 addition & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1,23 +1,11 @@
11
import tensorflow as tf
22
import numpy as np
3-
# import time
4-
# from tensorflow.keras.preprocessing.image import ImageDataGenerator
53
import json, os
64

7-
"""
8-
testdatagen = ImageDataGenerator(rescale=1./255)
9-
10-
# test_it = testdatagen.flow_from_directory('dbs/test', shuffle=False, target_size=(80, 80), class_mode='binary')
11-
# print(test_it.next()[0].shape)
12-
test_it = testdatagen.flow_from_directory('dbs/lfw-cropped', shuffle=False, target_size=(80, 80), class_mode='binary')
13-
14-
labels = dict((v,k) for k,v in test_it.class_indices.items())
15-
"""
165
models = {
176
"id": tf.keras.models.load_model("models/id/idk-id.h5"),
187
"gen": tf.keras.models.load_model("models/gen/15-altb-gen.h5"),
198
"age": tf.keras.models.load_model("models/age/idk-long-age.h5")
20-
219
}
2210

2311

@@ -29,9 +17,8 @@ def get_indices(name):
2917

3018
def get_label(name, model, img):
3119
pred = model.predict_on_batch(tf.expand_dims(img, 0))
32-
# print(get_indices("id")[str(np.argmax(pred))])
3320
if name in ("id", "age"):
34-
return get_indices(name)[str(np.argmax(pred))], 0 # pred[0][np.argmax(pred)].numpy()
21+
return get_indices(name)[str(np.argmax(pred))], 0
3522
elif name == "gen":
3623
return get_indices("gen")[str(int(round(pred[0][0].numpy())))], pred[0][0].numpy() if not int(
3724
round(pred[0][0].numpy())) else 100 - pred[0][0].numpy()
@@ -60,11 +47,8 @@ def predictFromPath(img_path="dbs/test/Ariel_Sharon_0006.jpg"):
6047
}
6148
}
6249

63-
# print(image.shape)
64-
6550
for mod in models.keys():
6651
labels = get_label(mod, models[mod], image)
67-
# print(labels[0], f"{labels[1] * 100}%")
6852
returns[mod]["class"], returns[mod]["confidence"] = labels[0], f"{labels[1] * 100}%"
6953

7054
return returns, img_path
@@ -89,11 +73,8 @@ def predictFromTensor(tensor):
8973
}
9074
}
9175

92-
# print(image.shape)
93-
9476
for mod in models.keys():
9577
labels = get_label(mod, models[mod], image)
96-
# print(labels[0], f"{labels[1] * 100}%")
9778
returns[mod]["class"], returns[mod]["confidence"] = labels[0], f"{labels[1] * 100}%"
9879

9980
return returns

static/JP.png

103 KB
Loading

static/Pascal.png

2.98 KB
Loading

static/happyPerson.png

316 KB
Loading

static/loading.gif

520 KB
Loading

0 commit comments

Comments
 (0)