Skip to content

Commit 4e65421

Browse files
#14 StudentCapture & TrainClassifier added to capture & save student encodings for recognition
1 parent 523c135 commit 4e65421

File tree

16 files changed

+425
-185
lines changed

16 files changed

+425
-185
lines changed

backend/src/app.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,11 +2,13 @@
22
from flask_restful import Api
33
from flask_jwt_extended import JWTManager
44
from flask_cors import CORS
5+
from flask_uploads import configure_uploads, patch_request_class
56
from marshmallow import ValidationError
67

8+
from src.libs.image_helper import IMAGE_SET
79
from src.resources.dashboard import Dashboard
810
from src.resources.teacher import Teacher, TeacherRegister, TeacherLogin
9-
from src.resources.student import StudentList, StudentAdd, StudentDelete
11+
from src.resources.student import StudentList, StudentAdd, StudentCapture, StudentDelete, TrainClassifier
1012
from src.resources.attendance import AttendanceList
1113
from src.resources.video_feed import (
1214
VideoFeedList, VideoFeedAdd, VideoFeed, VideoFeedPreview, VideoFeedStop, VideoFeedStart, VideoFeedDelete
@@ -16,6 +18,8 @@
1618
app = Flask(__name__)
1719
app.config.from_object("src.settings.FlaskAppConfiguration")
1820
api = Api(app)
21+
patch_request_class(app, 2 * 1024 * 1024) # 2 MB max size upload
22+
configure_uploads(app, IMAGE_SET)
1923
jwt = JWTManager(app)
2024
cors = CORS(app)
2125

@@ -45,7 +49,9 @@ def handle_marshmallow_validation(err):
4549
# /students
4650
api.add_resource(StudentList, "/students")
4751
api.add_resource(StudentAdd, "/students/add")
52+
api.add_resource(StudentCapture, "/students/capture/<int:student_id>")
4853
api.add_resource(StudentDelete, "/students/delete/<int:student_id>")
54+
api.add_resource(TrainClassifier, "/students/train")
4955

5056
# /attendance
5157
api.add_resource(AttendanceList, "/attendance")

backend/src/libs/cli_utils.py

Lines changed: 0 additions & 130 deletions
Original file line numberDiff line numberDiff line change
@@ -153,136 +153,6 @@ def train_classifier(cls):
153153
f.write(pickle.dumps(data))
154154
f.close()
155155

156-
# def recognize_n_attendance_dnn(self):
157-
# print("[INFO] loading encodings...")
158-
# data = pickle.loads(open(ENCODINGS_FILE, "rb").read())
159-
# # print(len(data['encodings']) == len(data['ids']))
160-
#
161-
# print("[INFO] starting video stream...")
162-
# # store input video stream in cap variable
163-
# cap = cv2.VideoCapture(self.input_video)
164-
# # load our serialized model from disk
165-
# net = cv2.dnn.readNetFromCaffe(prototxt=PROTOTXT_PATH, caffeModel=CAFFEMODEL_PATH)
166-
#
167-
# # find if today's attendance exists in the database
168-
# attendance = AttendanceModel.find_by_date(date=dt.today())
169-
# # if not
170-
# if attendance is None:
171-
# # create new instance for today's attendance
172-
# attendance = AttendanceModel()
173-
#
174-
# # loop over the frames from the video stream
175-
# while True:
176-
# # grab the frame from the video stream
177-
# ret, img = cap.read()
178-
#
179-
# # convert the input frame from BGR to RGB then resize it to have
180-
# # a width of 750px (to speedup processing)
181-
# rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
182-
# # rgb = imutils.resize(img, width=750)
183-
# r = img.shape[1] / float(rgb.shape[1])
184-
#
185-
# # grab the image frame dimensions and convert it to a blob
186-
# (h, w) = img.shape[:2]
187-
# blob = cv2.dnn.blobFromImage(
188-
# cv2.resize(img, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0)
189-
# )
190-
#
191-
# # pass the blob through the network and obtain the detections and
192-
# # predictions
193-
# net.setInput(blob)
194-
# detections = net.forward()
195-
#
196-
# # loop over the detections
197-
# for i in range(0, detections.shape[2]):
198-
# # extract the confidence (i.e., probability) associated with the
199-
# # prediction
200-
# confidence = detections[0, 0, i, 2]
201-
#
202-
# # filter out weak detections by ensuring the `confidence` is
203-
# # greater than the minimum confidence
204-
# if confidence < 0.5:
205-
# continue
206-
#
207-
# # compute the (x, y)-coordinates of the bounding box for the
208-
# # object
209-
# box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
210-
# (startX, startY, endX, endY) = box.astype("int")
211-
#
212-
# # detect the (x, y)-coordinates of the bounding boxes
213-
# # corresponding to each face in the input frame, then compute
214-
# # the facial embeddings for each face
215-
# boxes = [(startY, endX, endY, startX)]
216-
#
217-
# encodings = face_recognition.face_encodings(rgb, boxes)
218-
# names = []
219-
#
220-
# # loop over the facial embeddings
221-
# for encoding in encodings:
222-
# # attempt to match each face in the input image to our known encodings
223-
# matches = face_recognition.compare_faces(data["encodings"], encoding, DLIB_TOLERANCE)
224-
# # name to be displayed on video
225-
# display_name = "Unknown"
226-
#
227-
# # check to see if we have found a match
228-
# if True in matches:
229-
# # find the indexes of all matched faces then initialize a
230-
# # dictionary to count the total number of times each face
231-
# # was matched
232-
# matched_indexes = [i for (i, b) in enumerate(matches) if b]
233-
# counts = {}
234-
#
235-
# # loop over the matched indexes and maintain a count for
236-
# # each recognized face
237-
# for matched_index in matched_indexes:
238-
# _id = data["ids"][matched_index]
239-
# counts[_id] = counts.get(_id, 0) + 1
240-
#
241-
# # determine the recognized face with the largest number
242-
# # of votes (note: in the event of an unlikely tie Python
243-
# # will select first entry in the dictionary)
244-
# _id = max(counts, key=counts.get)
245-
# if _id:
246-
# # find matched student in the database by id
247-
# student = StudentModel.find_by_id(_id)
248-
# # if student's attendance is not marked
249-
# if not attendance.is_marked(student):
250-
# # then mark student's attendance
251-
# attendance.students.append(student)
252-
# # commit changes to database
253-
# attendance.save_to_db()
254-
# # update displayed name to student's name
255-
# display_name = student.name
256-
# # append the name to be displayed in names list
257-
# names.append(display_name)
258-
# # loop over the recognized faces
259-
# for ((top, right, bottom, left), display_name) in zip(boxes, names):
260-
# if display_name == "Unknown":
261-
# continue
262-
# # rescale the face coordinates
263-
# top = int(top * r)
264-
# right = int(right * r)
265-
# bottom = int(bottom * r)
266-
# left = int(left * r)
267-
# top_left = (left, top)
268-
# bottom_right = (right, bottom)
269-
#
270-
# # draw the predicted face name on the image
271-
# cv2.rectangle(img, top_left, bottom_right, (0, 255, 0), 2)
272-
# y = top - 15 if top - 15 > 15 else top + 15
273-
# cv2.putText(img, display_name, (left, y), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 255, 0), 2)
274-
#
275-
# # display the output frames to the screen
276-
# cv2.imshow(f"Recognizing Faces - {self.app_title}", img)
277-
# k = cv2.waitKey(100) & 0xff # Press 'ESC' for exiting from the loop
278-
# if k == 27:
279-
# break
280-
#
281-
# # do a bit of cleanup
282-
# cap.release()
283-
# cv2.destroyAllWindows()
284-
# print("Attendance Successful!")
285-
286156
def recognize_n_attendance(self):
287157
print("[INFO] loading encodings...")
288158
data = pickle.loads(open(ENCODINGS_FILE, "rb").read())

backend/src/libs/image_helper.py

Lines changed: 74 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,74 @@
1+
import os
2+
import re
3+
from typing import Union
4+
from werkzeug.datastructures import FileStorage
5+
6+
from flask_uploads import UploadSet, IMAGES
7+
8+
IMAGE_SET = UploadSet("images", IMAGES) # set name and allowed extensions
9+
10+
11+
def save_image(image: FileStorage, folder: str = None, name: str = None) -> str:
12+
return IMAGE_SET.save(image, folder, name)
13+
14+
15+
def get_path(filename: str = None, folder: str = None) -> str:
16+
return IMAGE_SET.path(filename, folder)
17+
18+
19+
def find_image_any_format(filename: str, folder: str) -> Union[str, None]:
20+
"""
21+
Given a format-less filename, try to find the file by appending each of the allowed formats to the given
22+
filename and check if the file exists
23+
:param filename: formatless filename
24+
:param folder: the relative folder in which to search
25+
:return: the path of the image if exists, otherwise None
26+
"""
27+
for _format in IMAGES: # look for existing avatar and delete it
28+
avatar = f"{filename}.{_format}"
29+
avatar_path = IMAGE_SET.path(filename=avatar, folder=folder)
30+
if os.path.isfile(avatar_path):
31+
return avatar_path
32+
return None
33+
34+
35+
def _retrieve_filename(file: Union[str, FileStorage]) -> str:
36+
"""
37+
Make our filename related functions generic, able to deal with FileStorage object as well as filename str.
38+
"""
39+
if isinstance(file, FileStorage):
40+
return file.filename
41+
return file
42+
43+
44+
def is_filename_safe(file: Union[str, FileStorage]) -> bool:
45+
"""
46+
Check if a filename is secure according to our definition
47+
- starts with a-z A-Z 0-9 at least one time
48+
- only contains a-z A-Z 0-9 and _().-
49+
- followed by a dot (.) and a allowed_format at the end
50+
"""
51+
filename = _retrieve_filename(file)
52+
53+
allowed_format = "|".join(IMAGES)
54+
# format IMAGES into regex, eg: ('jpeg','png') --> 'jpeg|png'
55+
regex = f"^[a-zA-Z0-9][a-zA-Z0-9_()-\.]*\.({allowed_format})$"
56+
return re.match(regex, filename) is not None
57+
58+
59+
def get_basename(file: Union[str, FileStorage]) -> str:
60+
"""
61+
Return file's basename, for example
62+
get_basename('some/folder/image.jpg') returns 'image.jpg'
63+
"""
64+
filename = _retrieve_filename(file)
65+
return os.path.split(filename)[1]
66+
67+
68+
def get_extension(file: Union[str, FileStorage]) -> str:
69+
"""
70+
Return file's extension, for example
71+
get_extension('image.jpg') returns '.jpg'
72+
"""
73+
filename = _retrieve_filename(file)
74+
return os.path.splitext(filename)[1]

backend/src/libs/web_utils.py

Lines changed: 1 addition & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -8,53 +8,18 @@
88
import face_recognition
99

1010
from src.settings import (
11-
VIDEO_SOURCE,
12-
PROTOTXT_PATH, CAFFEMODEL_PATH,
1311
DLIB_MODEL, DLIB_TOLERANCE,
1412
ENCODINGS_FILE
1513
)
1614
from src.libs.base_camera import BaseCamera
17-
from src.models import StudentModel, AttendanceModel, VideoFeedModel
18-
19-
20-
class DetectionCamera(BaseCamera):
21-
video_source = 0
22-
23-
def __init__(self):
24-
if VIDEO_SOURCE:
25-
DetectionCamera.set_video_source(VIDEO_SOURCE)
26-
super(DetectionCamera, self).__init__()
27-
28-
@classmethod
29-
def set_video_source(cls, source):
30-
cls.video_source = source
31-
32-
@classmethod
33-
def frames(cls):
34-
camera = cv2.VideoCapture(cls.video_source)
35-
if not camera.isOpened():
36-
raise RuntimeError('Could not start camera.')
37-
38-
while True:
39-
# read current frame
40-
_, img = camera.read()
41-
42-
# TODO: logic of face detection
43-
44-
# encode as a jpeg image and return it
45-
yield cv2.imencode('.jpg', img)[1].tobytes()
15+
from src.models import StudentModel, AttendanceModel
4616

4717

4818
class RecognitionCamera(BaseCamera):
4919
video_source = 0
5020
# this class variable will help to process every other frame of video to save time
5121
process_this_frame = True
5222

53-
# def __init__(self, unique_id=None):
54-
# if VIDEO_SOURCE:
55-
# RecognitionCamera.set_video_source(VIDEO_SOURCE)
56-
# super(RecognitionCamera, self).__init__(unique_id)
57-
5823
@classmethod
5924
def set_video_source(cls, source):
6025
cls.video_source = source

0 commit comments

Comments
 (0)