From da27e7bc31825a16560ab1ab6fa0187e5f491dc5 Mon Sep 17 00:00:00 2001 From: fengyuentau Date: Sat, 13 Jan 2024 16:14:58 +0800 Subject: [PATCH 1/2] initial commit --- models/face_recognition_sface/README.md | 2 +- models/face_recognition_sface/demo.py | 98 ++++++++++++++++++++++--- models/face_recognition_sface/sface.py | 4 +- 3 files changed, 89 insertions(+), 15 deletions(-) diff --git a/models/face_recognition_sface/README.md b/models/face_recognition_sface/README.md index 3baf7be1..e931d41d 100644 --- a/models/face_recognition_sface/README.md +++ b/models/face_recognition_sface/README.md @@ -26,7 +26,7 @@ Run the following command to try the demo: ```shell # recognize on images -python demo.py --input1 /path/to/image1 --input2 /path/to/image2 +python demo.py --target /path/to/image1 --query /path/to/image2 # get help regarding various parameters python demo.py --help diff --git a/models/face_recognition_sface/demo.py b/models/face_recognition_sface/demo.py index 5bac0157..248b9df1 100644 --- a/models/face_recognition_sface/demo.py +++ b/models/face_recognition_sface/demo.py @@ -30,10 +30,10 @@ parser = argparse.ArgumentParser( description="SFace: Sigmoid-Constrained Hypersphere Loss for Robust Face Recognition (https://ieeexplore.ieee.org/document/9318547)") -parser.add_argument('--input1', '-i1', type=str, - help='Usage: Set path to the input image 1 (original face).') -parser.add_argument('--input2', '-i2', type=str, - help='Usage: Set path to the input image 2 (comparison face).') +parser.add_argument('--target', '-t', type=str, + help='Usage: Set path to the input image 1 (target face).') +parser.add_argument('--query', '-q', type=str, + help='Usage: Set path to the input image 2 (query).') parser.add_argument('--model', '-m', type=str, default='face_recognition_sface_2021dec.onnx', help='Usage: Set model path, defaults to face_recognition_sface_2021dec.onnx.') parser.add_argument('--backend_target', '-bt', type=int, default=0, @@ -46,8 +46,64 @@ '''.format(*[x for x in range(len(backend_target_pairs))])) parser.add_argument('--dis_type', type=int, choices=[0, 1], default=0, help='Usage: Distance type. \'0\': cosine, \'1\': norm_l1. Defaults to \'0\'') +parser.add_argument('--save', '-s', action='store_true', + help='Usage: Specify to save file with results (i.e. bounding box, confidence level). Invalid in case of camera input.') +parser.add_argument('--vis', '-v', action='store_true', + help='Usage: Specify to open a new window to show results. Invalid in case of camera input.') args = parser.parse_args() +def visualize(img1, faces1, img2, faces2, matches, scores, target_size=[512, 512]): # target_size: (h, w) + out1 = img1.copy() + out2 = img2.copy() + matched_box_color = (0, 255, 0) # BGR + mismatched_box_color = (0, 0, 255) # BGR + + # Resize to 256x256 with the same aspect ratio + padded_out1 = np.zeros((target_size[0], target_size[1], 3)).astype(np.uint8) + h1, w1, _ = out1.shape + ratio1 = min(target_size[0] / out1.shape[0], target_size[1] / out1.shape[1]) + new_h1 = int(h1 * ratio1) + new_w1 = int(w1 * ratio1) + resized_out1 = cv.resize(out1, (new_w1, new_h1), interpolation=cv.INTER_LINEAR).astype(np.float32) + top = max(0, target_size[0] - new_h1) // 2 + bottom = top + new_h1 + left = max(0, target_size[1] - new_w1) // 2 + right = left + new_w1 + padded_out1[top : bottom, left : right] = resized_out1 + + # Draw bbox + bbox1 = faces1[0][:4] * ratio1 + x, y, w, h = bbox1.astype(np.int32) + cv.rectangle(padded_out1, (x + left, y + top), (x + left + w, y + top + h), matched_box_color, 2) + + # Resize to 256x256 with the same aspect ratio + padded_out2 = np.zeros((target_size[0], target_size[1], 3)).astype(np.uint8) + h2, w2, _ = out2.shape + ratio2 = min(target_size[0] / out2.shape[0], target_size[1] / out2.shape[1]) + new_h2 = int(h2 * ratio2) + new_w2 = int(w2 * ratio2) + resized_out2 = cv.resize(out2, (new_w2, new_h2), interpolation=cv.INTER_LINEAR).astype(np.float32) + top = max(0, target_size[0] - new_h2) // 2 + bottom = top + new_h2 + left = max(0, target_size[1] - new_w2) // 2 + right = left + new_w2 + padded_out2[top : bottom, left : right] = resized_out2 + + # Draw bbox + assert faces2.shape[0] == len(matches), "number of faces2 needs to match matches" + assert len(matches) == len(scores), "number of matches needs to match number of scores" + for index, match in enumerate(matches): + bbox2 = faces2[index][:4] * ratio2 + x, y, w, h = bbox2.astype(np.int32) + box_color = matched_box_color if match else mismatched_box_color + cv.rectangle(padded_out2, (x + left, y + top), (x + left + w, y + top + h), box_color, 2) + + score = scores[index] + text_color = matched_box_color if match else mismatched_box_color + cv.putText(padded_out2, "{:.2f}".format(score), (x + left, y + top - 5), cv.FONT_HERSHEY_DUPLEX, 0.4, text_color) + + return np.concatenate([padded_out1, padded_out2], axis=1) + if __name__ == '__main__': backend_id = backend_target_pairs[args.backend_target][0] target_id = backend_target_pairs[args.backend_target][1] @@ -65,17 +121,35 @@ backendId=backend_id, targetId=target_id) - img1 = cv.imread(args.input1) - img2 = cv.imread(args.input2) + img1 = cv.imread(args.target) + img2 = cv.imread(args.query) # Detect faces detector.setInputSize([img1.shape[1], img1.shape[0]]) - face1 = detector.infer(img1) - assert face1.shape[0] > 0, 'Cannot find a face in {}'.format(args.input1) + faces1 = detector.infer(img1) + assert faces1.shape[0] > 0, 'Cannot find a face in {}'.format(args.target) detector.setInputSize([img2.shape[1], img2.shape[0]]) - face2 = detector.infer(img2) - assert face2.shape[0] > 0, 'Cannot find a face in {}'.format(args.input2) + faces2 = detector.infer(img2) + assert faces2.shape[0] > 0, 'Cannot find a face in {}'.format(args.query) # Match - result = recognizer.match(img1, face1[0][:-1], img2, face2[0][:-1]) - print('Result: {}.'.format('same identity' if result else 'different identities')) + scores = [] + matches = [] + for face in faces2: + result = recognizer.match(img1, faces1[0][:-1], img2, face[:-1]) + scores.append(result[0]) + matches.append(result[1]) + + # Draw results + image = visualize(img1, faces1, img2, faces2, matches, scores) + + # Save results if save is true + if args.save: + print('Resutls saved to result.jpg\n') + cv.imwrite('result.jpg', image) + + # Visualize results in a new window + if args.vis: + cv.namedWindow("SFace Demo", cv.WINDOW_AUTOSIZE) + cv.imshow("SFace Demo", image) + cv.waitKey(0) diff --git a/models/face_recognition_sface/sface.py b/models/face_recognition_sface/sface.py index 91ca7361..cb467071 100644 --- a/models/face_recognition_sface/sface.py +++ b/models/face_recognition_sface/sface.py @@ -57,7 +57,7 @@ def match(self, image1, face1, image2, face2): if self._disType == 0: # COSINE cosine_score = self._model.match(feature1, feature2, self._disType) - return 1 if cosine_score >= self._threshold_cosine else 0 + return cosine_score, 1 if cosine_score >= self._threshold_cosine else 0 else: # NORM_L2 norml2_distance = self._model.match(feature1, feature2, self._disType) - return 1 if norml2_distance <= self._threshold_norml2 else 0 + return norml2_distance, 1 if norml2_distance <= self._threshold_norml2 else 0 From 902a849d9ce0f91416ad79a6fca6e36cad0a3f5b Mon Sep 17 00:00:00 2001 From: fengyuentau Date: Sat, 13 Jan 2024 16:47:16 +0800 Subject: [PATCH 2/2] add example output --- README.md | 4 ++++ models/face_recognition_sface/README.md | 6 ++++++ models/face_recognition_sface/example_outputs/demo.jpg | 3 +++ 3 files changed, 13 insertions(+) create mode 100644 models/face_recognition_sface/example_outputs/demo.jpg diff --git a/README.md b/README.md index e2af9a83..ce5a381e 100644 --- a/README.md +++ b/README.md @@ -61,6 +61,10 @@ Some examples are listed below. You can find more in the directory of each model ![largest selfie](./models/face_detection_yunet/example_outputs/largest_selfie.jpg) +### Face Recognition with [SFace](./models/face_recognition_sface/) + +![sface demo](./models/face_recognition_sface/example_outputs/demo.jpg) + ### Facial Expression Recognition with [Progressive Teacher](./models/facial_expression_recognition/) ![fer demo](./models/facial_expression_recognition/example_outputs/selfie.jpg) diff --git a/models/face_recognition_sface/README.md b/models/face_recognition_sface/README.md index e931d41d..6fb9c5c1 100644 --- a/models/face_recognition_sface/README.md +++ b/models/face_recognition_sface/README.md @@ -32,6 +32,12 @@ python demo.py --target /path/to/image1 --query /path/to/image2 python demo.py --help ``` +### Example outputs + +![sface demo](./example_outputs/demo.jpg) + +Note: Left part of the image is the target identity, the right part is the query. Green boxes are the same identity, red boxes are different identities compared to the left. + ## License All files in this directory are licensed under [Apache 2.0 License](./LICENSE). diff --git a/models/face_recognition_sface/example_outputs/demo.jpg b/models/face_recognition_sface/example_outputs/demo.jpg new file mode 100644 index 00000000..2d49bbc6 --- /dev/null +++ b/models/face_recognition_sface/example_outputs/demo.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f879881a598fea6fec74e047e6a1d00e36d81de63bf0ed392b628e6ab6c2fc4 +size 156282