Skip to content

Commit 5f5a609

Browse files
committed
cleanup'
'
1 parent 700be8d commit 5f5a609

File tree

4 files changed

+2
-118
lines changed

4 files changed

+2
-118
lines changed

mitdeeplearning/data/test_faces.h5py

-724 KB
Binary file not shown.

mitdeeplearning/lab2.py

Lines changed: 0 additions & 115 deletions
Original file line numberDiff line numberDiff line change
@@ -105,118 +105,3 @@ def get_test_faces():
105105
images[key].append(image)
106106

107107
return images["LF"], images["LM"], images["DF"], images["DM"]
108-
109-
110-
class PPBFaceEvaluator:
111-
''' Evaluate on the PPB dataset'''
112-
def __init__(self, skip=4):
113-
114-
path_to_faces = tf.keras.utils.get_file('ppb', 'https://www.dropbox.com/s/l0lp6qxeplumouf/PPB.tar?dl=1', extract=True)
115-
self.ppb_root = os.path.join(os.path.split(path_to_faces)[0], 'PPB-2017')
116-
117-
ppb_anno = os.path.join(self.ppb_root,'PPB-2017-metadata.csv')
118-
119-
self.anno_dict = {}
120-
with open(ppb_anno) as f:
121-
for line in f.read().split('\r'):
122-
ind, name, gender, numeric, skin, country = line.split(',')
123-
self.anno_dict[name] = (gender.lower(),skin.lower())
124-
125-
image_dir = os.path.join(self.ppb_root, "imgs")
126-
image_files = sorted(os.listdir(image_dir))[::skip] #sample every 4 images for computation time in the lab
127-
128-
self.raw_images = {
129-
'male_darker':[],
130-
'male_lighter':[],
131-
'female_darker':[],
132-
'female_lighter':[],
133-
}
134-
135-
for filename in image_files:
136-
if not filename.endswith(".jpg"):
137-
continue
138-
image = cv2.imread(os.path.join(image_dir,filename))[:,:,::-1]
139-
gender, skin = self.anno_dict[filename]
140-
self.raw_images[gender+'_'+skin].append(image)
141-
142-
143-
def get_sample_faces_from_demographic(self, gender, skin_color):
144-
key = self.__get_key(gender, skin_color)
145-
data = self.raw_images[key][50]/255.
146-
return data
147-
148-
149-
def evaluate(self, models_to_test, gender, skin_color, output_idx=None, from_logit=False, patch_stride=0.2, patch_depth=5):
150-
correct_predictions = [0.0]*len(models_to_test)
151-
152-
key = self.__get_key(gender, skin_color)
153-
num_faces = len(self.raw_images[key])
154-
155-
import progressbar
156-
bar = progressbar.ProgressBar()
157-
for face_idx in bar(range(num_faces)):
158-
159-
image = self.raw_images[key][face_idx]
160-
height, width, _ = image.shape
161-
162-
patches, bboxes = slide_square(image, patch_stride, width/2, width, patch_depth)
163-
patches = tf.cast(tf.constant(patches, dtype=tf.uint8), tf.float32)/255.
164-
165-
for model_idx, model in enumerate(models_to_test):
166-
out = model(patches)
167-
y = out if output_idx is None else out[output_idx]
168-
y = y.numpy()
169-
y_inds = np.argsort(y.flatten())
170-
most_likely_prob = y[y_inds[-1]]
171-
if (from_logit and most_likely_prob >= 0.0) or \
172-
(not from_logit and most_likely_prob >= 0.5):
173-
correct_predictions[model_idx] += 1
174-
175-
accuracy = [correct_predictions[i]/num_faces for i,_ in enumerate(models_to_test)]
176-
return accuracy
177-
178-
179-
def __get_key(self, gender, skin_color):
180-
gender = gender.lower()
181-
skin_color = skin_color.lower()
182-
assert gender in ['male', 'female']
183-
assert skin_color in ['lighter', 'darker']
184-
return '{}_{}'.format(gender, skin_color)
185-
186-
187-
188-
''' function to slide a square across image and extract square regions
189-
img = the image
190-
stride = (0,1], provides the fraction of the dimension for which to slide to generate a crop
191-
max_size = maximum square size
192-
min_size = minimum square size
193-
n = number of different sizes including min_size, max_size '''
194-
def slide_square(img, stride, min_size, max_size, n):
195-
img_h, img_w = img.shape[:2]
196-
197-
square_sizes = np.linspace(min_size, max_size, n, dtype=np.int32)
198-
square_images = []
199-
square_bbox = [] # list of list of tuples: [(i1,j1), (i2,j2)] where i1,j1 is the top left corner; i2,j2 is bottom right corner
200-
# for each of the square_sizes
201-
for level, sq_dim in enumerate(square_sizes):
202-
203-
stride_length = int(stride*sq_dim)
204-
stride_start_i = xrange(0,int(img_h-sq_dim+1),stride_length)
205-
stride_start_j = xrange(0,int(img_w-sq_dim+1),stride_length)
206-
for i in stride_start_i:
207-
for j in stride_start_j:
208-
square_top_left = (i,j)
209-
210-
square_bottom_right = (i+sq_dim,j+sq_dim)
211-
212-
square_corners = (square_top_left,square_bottom_right)
213-
square_corners_show = ((j,i),(j+sq_dim,i+sq_dim))
214-
square_image = img[i:i+sq_dim,j:j+sq_dim]
215-
216-
square_resize = cv2.resize(square_image, IM_SHAPE[:2], interpolation=cv2.INTER_NEAREST)
217-
# append to list of images and bounding boxes
218-
219-
square_images.append(square_resize)
220-
square_bbox.append(square_corners)
221-
222-
return square_images, square_bbox

mitdeeplearning/util.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
import matplotlib.pyplot as plt
22
import tensorflow as tf
33
import time
4-
import progressbar
54
import numpy as np
65

76
from IPython import display as ipythondisplay

setup.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,13 +22,13 @@ def get_dist(pkgname):
2222
setup(
2323
name = 'mitdeeplearning', # How you named your package folder (MyLib)
2424
packages = ['mitdeeplearning'], # Chose the same as "name"
25-
version = '0.5.4', # Start with a small number and increase it with every change you make
25+
version = '0.5.5', # Start with a small number and increase it with every change you make
2626
license='MIT', # Chose a license from here: https://help.github.com/articles/licensing-a-repository
2727
description = 'Official software labs for MIT Introduction to Deep Learning (http://introtodeeplearning.com)', # Give a short description about your library
2828
author = 'Alexander Amini', # Type in your name
2929
author_email = '[email protected]', # Type in your E-Mail
3030
url = 'http://introtodeeplearning.com', # Provide either the link to your github or to your website
31-
download_url = 'https://github.com/aamini/introtodeeplearning_labs/archive/v0.5.4.tar.gz', # I explain this later on
31+
download_url = 'https://github.com/aamini/introtodeeplearning_labs/archive/v0.5.5.tar.gz', # I explain this later on
3232
keywords = ['deep learning', 'neural networks', 'tensorflow', 'introduction'], # Keywords that define your package best
3333
install_requires=install_deps,
3434
classifiers=[

0 commit comments

Comments
 (0)