Skip to content

Commit 6fa0578

Browse files
committed
Add webcam
1 parent 01c9ead commit 6fa0578

File tree

3 files changed

+194
-29
lines changed

3 files changed

+194
-29
lines changed

README.md

Lines changed: 23 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -187,7 +187,7 @@ There are three training steps involved.
187187

188188
1. Five random seeds on 5-fold cross-validation on the `Adience` dataset.
189189

190-
Since the reported metrics (i.e. accuracy) is 5-fold cross-validation, we will do the same here. In order to get the least biased numbers, we run this five times each with a different seed. This means that we are training in total of 25 times and report the average of the 25 numbers. Configure the values in `cross-val.json` and run `python cross-val.py`.
190+
Since the reported metrics (i.e., accuracy) is 5-fold cross-validation, we will do the same here. In order to get the least biased numbers, we run this five times each with a different seed. This means that we are training in total of 25 times and report the average of the 25 numbers. Configure the values in `cross-val.json` and run `python cross-val.py`.
191191

192192
## [Evaluation results](training-results/TRAINING-RESULTS.md)
193193

@@ -257,9 +257,29 @@ Check out [this demo video](https://youtu.be/Dna_Hp-s78I).
257257
python3 app.py
258258
```
259259

260-
After running the container (i.e. `docker run -it --rm -p 10003:10003 tae898/age-gender`), you can run `client.py` (e.g. `python client.py --image-path test-images/matrix-tae-final_exported_37233.jpg`) to get estimated genders and ages in the picture.
260+
### Running a client
261261

262-
NB: You also have to run the face-detection-recognition (`docker run -it --rm -p 10002:10002 tae898/face-detection-recognition` for CPU or `docker run --gpus all -it --rm -p 10002:10002 tae898/face-detection-recognition-cuda` for cuda), before running `client.py`. This separation might be annoying but the modularization will help in the future.
262+
First install the requirements by running `pip install requirements-client.txt`, and then run the two containers:
263+
264+
1. `docker run -it --rm -p 10002:10002 tae898/face-detection-recognition` for CPU or `docker run --gpus all -it --rm -p 10002:10002 tae898/face-detection-recognition-cuda` for cuda.
265+
1. `docker run -it --rm -p 10003:10003 tae898/age-gender` for CPU or `docker run -it --rm -p 10003:10003 --gpus all tae898/age-gender-cuda` for cuda.
266+
267+
Now that the two containers are running, you can run `client.py`. There are two options to run the client.
268+
269+
```sh
270+
usage: client.py [-h] [--url-face URL_FACE] [--url-age-gender URL_AGE_GENDER]
271+
[--image-path IMAGE_PATH] [--camera-id CAMERA_ID]
272+
[--mode MODE]
273+
```
274+
275+
1. If you have an image stored in disk and want to run the models on this image, then do something like:
276+
```sh
277+
python client.py --mode image --image-path test-images/gettyimages-1067881118-2048x2048.jpg
278+
```
279+
1. If you want to run the models on your webcam video, then do something like:
280+
```sh
281+
python client.py --mode webcam
282+
```
263283

264284
## Troubleshooting
265285

client.py

Lines changed: 159 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -18,17 +18,20 @@
1818
)
1919

2020

21-
def main(url_face: str, url_age_gender: str, image_path: str):
21+
def send_to_servers(binary_image, url_face: str, url_age_gender: str) -> None:
22+
"""Send a binary image to the two servers.
2223
23-
logging.debug(f"loading image ...")
24-
if isinstance(image_path, str):
25-
with open(image_path, "rb") as stream:
26-
binary_image = stream.read()
27-
elif isinstance(image_path, Image.Image):
28-
binary_image = io.BytesIO()
29-
image_path.save(binary_image, format="JPEG")
30-
binary_image = binary_image.getvalue()
24+
Args
25+
----
26+
binary_image: binary image
27+
url_face: url of the face-detection-recognition server
28+
url_age_gender: url of the age-gender server.
3129
30+
Returns
31+
-------
32+
genders, ages, bboxes, det_scores, landmarks, embeddings
33+
34+
"""
3235
data = {"image": binary_image}
3336
logging.info(f"image loaded!")
3437

@@ -60,8 +63,20 @@ def main(url_face: str, url_age_gender: str, image_path: str):
6063
ages = response["ages"]
6164
genders = response["genders"]
6265

66+
return genders, ages, bboxes, det_scores, landmarks, embeddings
67+
68+
69+
def annotate_image(image: Image.Image, genders: list, ages: list, bboxes: list) -> None:
70+
"""Annotate a given image. This is done in-place. Nothing is returned.
71+
72+
Args
73+
----
74+
image: Pillow image
75+
genders, ages, bboxes
76+
77+
"""
6378
logging.debug(f"annotating image ...")
64-
image = Image.open(image_path)
79+
6580
draw = ImageDraw.Draw(image)
6681
font = ImageFont.truetype("fonts/arial.ttf", 25)
6782

@@ -83,8 +98,33 @@ def main(url_face: str, url_age_gender: str, image_path: str):
8398
fill=(0, 255, 0),
8499
font=font,
85100
)
86-
image.save(image_path + ".ANNOTATED.jpg")
87-
logging.info(f"image annotated and saved at {image_path + '.ANNOTATED.jpg'}")
101+
102+
103+
def save_annotated_image(
104+
image: Image.Image,
105+
save_path: str,
106+
bboxes: list,
107+
det_scores: list,
108+
landmarks: list,
109+
embeddings: list,
110+
genders: list,
111+
ages: list,
112+
) -> None:
113+
"""Save the annotated image.
114+
115+
Args
116+
----
117+
image: Pilow image
118+
bboxes:
119+
det_scores:
120+
landmarks:
121+
embeddings:
122+
genders:
123+
ages:
124+
125+
"""
126+
image.save(save_path)
127+
logging.info(f"image annotated and saved at {save_path}")
88128

89129
to_dump = {
90130
"bboxes": bboxes,
@@ -95,19 +135,122 @@ def main(url_face: str, url_age_gender: str, image_path: str):
95135
"ages": ages,
96136
}
97137

98-
with open(image_path + ".pkl", "wb") as stream:
138+
with open(save_path + ".pkl", "wb") as stream:
99139
pickle.dump(to_dump, stream)
100-
logging.info(f"features saved at at {image_path + '.pkl'}")
140+
logging.info(f"features saved at at {save_path + '.pkl'}")
141+
142+
143+
def run_image(url_face: str, url_age_gender: str, image_path: str):
144+
"""Run age-gender on the image.
145+
146+
Args
147+
----
148+
url_face: url of the face-detection-recognition server
149+
url_age_gender: url of the age-gender server.
150+
image_path
151+
152+
"""
153+
logging.debug(f"loading image ...")
154+
with open(image_path, "rb") as stream:
155+
binary_image = stream.read()
156+
157+
genders, ages, bboxes, det_scores, landmarks, embeddings = send_to_servers(
158+
binary_image, url_face, url_age_gender
159+
)
160+
161+
image = Image.open(image_path)
162+
163+
annotate_image(image, genders, ages, bboxes)
164+
165+
save_path = image_path + ".ANNOTATED.jpg"
166+
167+
save_annotated_image(
168+
image, save_path, bboxes, det_scores, landmarks, embeddings, genders, ages
169+
)
170+
171+
172+
def annotate_fps(image: Image.Image, fps: int) -> None:
173+
"""Annotate fps on a given image.
174+
175+
Args
176+
----
177+
image: Pillow image
178+
fps: frames per second
179+
180+
"""
181+
draw = ImageDraw.Draw(image)
182+
font = ImageFont.truetype("fonts/arial.ttf", 25)
183+
draw.text((0, 0), f"FPS: {fps} (Press q to exit.)", fill=(0, 0, 255), font=font)
184+
185+
186+
def run_webcam(url_face: str, url_age_gender: str, camera_id: int):
187+
188+
import time
189+
190+
import cv2
191+
192+
cap = cv2.VideoCapture(camera_id)
193+
194+
if not cap.isOpened():
195+
print("Cannot open camera")
196+
exit()
197+
198+
# fps = []
199+
while True:
200+
start_time = time.time() # start time of the loop
201+
# Capture frame-by-frame
202+
ret, image_BGR = cap.read()
203+
# if frame is read correctly ret is True
204+
if not ret:
205+
print("Can't receive frame (stream end?). Exiting ...")
206+
break
207+
# Our operations on the frame come here
208+
# gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
209+
# Display the resulting frame
210+
image_RGB = cv2.cvtColor(image_BGR, cv2.COLOR_BGR2RGB)
211+
212+
image_PIL = Image.fromarray(image_RGB)
213+
binary_image = io.BytesIO()
214+
image_PIL.save(binary_image, format="JPEG")
215+
binary_image = binary_image.getvalue()
216+
217+
genders, ages, bboxes, det_scores, landmarks, embeddings = send_to_servers(
218+
binary_image, url_face, url_age_gender
219+
)
220+
221+
annotate_image(image_PIL, genders, ages, bboxes)
222+
223+
# fps.append(time)
224+
fps = int(1.0 / (time.time() - start_time))
225+
226+
annotate_fps(image_PIL, fps)
227+
228+
cv2.imshow("frame", cv2.cvtColor(np.array(image_PIL), cv2.COLOR_RGB2BGR))
229+
if cv2.waitKey(1) == ord("q"):
230+
break
231+
232+
# When everything done, release the capture
233+
cap.release()
234+
cv2.destroyAllWindows()
101235

102236

103237
if __name__ == "__main__":
104238
parser = argparse.ArgumentParser(description="Extract face, gender, and age.")
105239
parser.add_argument("--url-face", type=str, default="http://127.0.0.1:10002/")
106240
parser.add_argument("--url-age-gender", type=str, default="http://127.0.0.1:10003/")
107-
parser.add_argument("--image-path", type=str)
241+
parser.add_argument("--image-path", type=str, default=None)
242+
parser.add_argument("--camera-id", type=int, default="0", help="ffplay /dev/video0")
243+
parser.add_argument("--mode", type=str, default="image", help="image or webcam")
108244

109245
args = vars(parser.parse_args())
110246

111247
logging.info(f"arguments given to {__file__}: {args}")
112248

113-
main(**args)
249+
mode = args.pop("mode")
250+
if mode == "image":
251+
assert args["image_path"] is not None
252+
del args["camera_id"]
253+
run_image(**args)
254+
else:
255+
del args["image_path"]
256+
run_webcam(**args)

requirements-client.txt

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,12 @@
1-
charset-normalizer==2.0.4
2-
idna==3.2
3-
importlib-metadata==4.8.1
4-
jsonpickle==2.0.0
5-
numpy==1.21.2
6-
Pillow==8.3.2
7-
requests==2.26.0
8-
typing-extensions==3.10.0.2
9-
urllib3==1.26.6
10-
zipp==3.5.0
1+
certifi==2021.10.8
2+
charset-normalizer==2.0.11
3+
idna==3.3
4+
importlib-metadata==4.10.1
5+
jsonpickle==2.1.0
6+
numpy==1.21.5
7+
opencv-python==4.5.5.62
8+
Pillow==9.0.1
9+
requests==2.27.1
10+
typing_extensions==4.0.1
11+
urllib3==1.26.8
12+
zipp==3.7.0

0 commit comments

Comments
 (0)