|
9 | 9 | import uuid
|
10 | 10 |
|
11 | 11 | import cv2
|
12 |
| -from model_api.models.open_pose import OpenPoseDecoder |
13 | 12 | import numpy as np
|
14 | 13 | from openvino.runtime import Core
|
15 | 14 | from ovmsclient import make_grpc_client
|
@@ -365,8 +364,7 @@ def modelPreconfigure(self):
|
365 | 364 | if self.device == "CPU":
|
366 | 365 | self.config = {'NUM_STREAMS': self.ov_cores,
|
367 | 366 | 'INFERENCE_NUM_THREADS': str(self.ov_cores),
|
368 |
| - 'PERFORMANCE_HINT': 'THROUGHPUT', |
369 |
| - 'AFFINITY': 'CORE'} |
| 367 | + 'PERFORMANCE_HINT': 'THROUGHPUT'} |
370 | 368 | self.core.set_property(device_name=self.device, properties=self.config)
|
371 | 369 |
|
372 | 370 | if self.plugin:
|
@@ -770,150 +768,6 @@ def deserializeOutput(self, data):
|
770 | 768 | dec_data = json.loads(data)
|
771 | 769 | return dec_data
|
772 | 770 |
|
773 |
| -class PoseEstimator(Detector): |
774 |
| - POSE_PAIRS = ((15, 13), (13, 11), (16, 14), (14, 12), (11, 12), (5, 11), |
775 |
| - (6, 12), (5, 6), (5, 7), (6, 8), (7, 9), (8, 10), (1, 2), |
776 |
| - (0, 1), (0, 2), (1, 3), (2, 4), (3, 5), (4, 6)) |
777 |
| - |
778 |
| - bodyPartKP = ['Nose', 'Left-Eye', 'Right-Eye', 'Left-Ear', 'Right-Ear', |
779 |
| - 'Left-Shoulder', 'Right-Shoulder', 'Left-Elbow', 'Right-Elbow', |
780 |
| - 'Left-Wrist', 'Right-Wrist', 'Left-Hip', 'Right-Hip', |
781 |
| - 'Left-Knee', 'Right-Knee', 'Left-Ankle', 'Right-Ankle'] |
782 |
| - |
783 |
| - colors = [ (255,0,0), (255,85,0), (255,170,0), (255,255,0), (170,255,0), (85,255,0), |
784 |
| - (0,255,0), (0,255,85), (0,255,170), (0,255,255), (0,170,255), (0,85,255), |
785 |
| - (0,0,255), (85,0,255), (170,0,255), (255,0,255), (255,0,170)] |
786 |
| - |
787 |
| - def __init__(self, asynchronous=False, distributed=Distributed.NONE): |
788 |
| - super().__init__(asynchronous=asynchronous, distributed=distributed) |
789 |
| - self.decoder = OpenPoseDecoder() |
790 |
| - self.saveDict = True |
791 |
| - |
792 |
| - return |
793 |
| - |
794 |
| - def setParameters(self, model, device, plugin, threshold, ov_cores): |
795 |
| - super().setParameters(model, device, plugin, threshold, ov_cores) |
796 |
| - |
797 |
| - if self.distributed == Distributed.OVMS: |
798 |
| - self.output_keys = list(self.model_metadata["outputs"].keys()) |
799 |
| - self.n, self.c, self.h, self.w = self.model_metadata["inputs"]["data"]["shape"] |
800 |
| - else: |
801 |
| - self.output_keys = [out.get_any_name() for out in self.model.outputs] |
802 |
| - self.n, self.c, self.h, self.w = self.model.inputs[0].shape |
803 |
| - |
804 |
| - return |
805 |
| - |
806 |
| - def postprocess(self, result): |
807 |
| - people = [] |
808 |
| - poses = self.processResults(result) |
809 |
| - |
810 |
| - for pose in poses: |
811 |
| - points = pose[:, :2] |
812 |
| - points_scores = pose[:, 2] |
813 |
| - |
814 |
| - hpe_bounds = [None] * 4 |
815 |
| - published_pose = [] |
816 |
| - for point, score in zip(points, points_scores): |
817 |
| - if len(point) == 0 or score == 0: |
818 |
| - published_pose.append(()) |
819 |
| - continue |
820 |
| - |
821 |
| - point_x, point_y = point[0], point[1] |
822 |
| - published_pose.append((point_x, point_y)) |
823 |
| - |
824 |
| - if hpe_bounds[0] is None or point_x < hpe_bounds[0]: |
825 |
| - hpe_bounds[0] = point_x |
826 |
| - if hpe_bounds[2] is None or point_x > hpe_bounds[2]: |
827 |
| - hpe_bounds[2] = point_x |
828 |
| - if hpe_bounds[1] is None or point_y < hpe_bounds[1]: |
829 |
| - hpe_bounds[1] = point_y |
830 |
| - if hpe_bounds[3] is None or point_y > hpe_bounds[3]: |
831 |
| - hpe_bounds[3] = point_y |
832 |
| - |
833 |
| - if hpe_bounds[0] == None: |
834 |
| - continue |
835 |
| - |
836 |
| - if self.hasKeypoints(published_pose, |
837 |
| - ('Right-Hip', 'Right-Knee', 'Right-Ankle', |
838 |
| - 'Left-Hip', 'Left-Knee', 'Left-Ankle')) \ |
839 |
| - or self.hasKeypoints(published_pose, |
840 |
| - ('Right-Shoulder', 'Right-Elbow', 'Right-Wrist', |
841 |
| - 'Left-Shoulder', 'Left-Elbow', 'Left-Wrist')): |
842 |
| - |
843 |
| - bounds = Rectangle(origin=Point(hpe_bounds[0], hpe_bounds[1]), |
844 |
| - opposite=Point(hpe_bounds[2], hpe_bounds[3])) |
845 |
| - if bounds.width == 0 or bounds.height == 0: |
846 |
| - continue |
847 |
| - |
848 |
| - comw = bounds.width / 3 |
849 |
| - comh = bounds.height / 4 |
850 |
| - center_of_mass = Rectangle(origin=Point(bounds.x + comw, bounds.y + comh), |
851 |
| - size=(comw, comh)) |
852 |
| - person = {'id': len(people) + 1, |
853 |
| - 'category': 'person', |
854 |
| - 'bounding_box': bounds.asDict, |
855 |
| - 'center_of_mass': center_of_mass.asDict, |
856 |
| - 'pose': published_pose} |
857 |
| - people.append(person) |
858 |
| - |
859 |
| - return people |
860 |
| - |
861 |
| - def hasKeypoints(self, pose, points): |
862 |
| - for point in points: |
863 |
| - idx = self.bodyPartKP.index(point) |
864 |
| - if idx >= len(pose) or not len(pose[idx]): |
865 |
| - return False |
866 |
| - return True |
867 |
| - |
868 |
| - def processResults(self, results): |
869 |
| - |
870 |
| - pafs = results.data[self.output_keys[0]] |
871 |
| - heatmaps = results.data[self.output_keys[1]] |
872 |
| - |
873 |
| - pooled_heatmaps = np.array( |
874 |
| - [[self.maxpool(h, kernel_size=3, stride=1, padding=1) for h in heatmaps[0]]]) |
875 |
| - nms_heatmaps = self.nonMaxSuppression(heatmaps, pooled_heatmaps) |
876 |
| - |
877 |
| - image_shape = results.save |
878 |
| - poses, _ = self.decoder(heatmaps, nms_heatmaps, pafs) |
879 |
| - |
880 |
| - if self.distributed == Distributed.OVMS: |
881 |
| - output_shape = self.model_metadata["outputs"][self.output_keys[0]]['shape'] |
882 |
| - else: |
883 |
| - output_shape = self.model.get_output_shape(0) |
884 |
| - |
885 |
| - image_width, image_height = image_shape |
886 |
| - _, _, output_height, output_width = output_shape |
887 |
| - x_scale, y_scale = image_width / output_width, image_height / output_height |
888 |
| - |
889 |
| - if self.keep_aspect: |
890 |
| - height_ratio = self.h / image_height |
891 |
| - width_ratio = self.w / image_width |
892 |
| - if height_ratio <= width_ratio: |
893 |
| - x_scale = x_scale / (height_ratio / width_ratio) |
894 |
| - else: |
895 |
| - y_scale = y_scale / (width_ratio / height_ratio) |
896 |
| - |
897 |
| - poses[:, :, :2] *= (x_scale, y_scale) |
898 |
| - return poses |
899 |
| - |
900 |
| - def maxpool(self, matrix, kernel_size, stride, padding): |
901 |
| - matrix = np.pad(matrix, padding, mode="constant") |
902 |
| - output_shape = ((matrix.shape[0] - kernel_size) // stride + 1, |
903 |
| - (matrix.shape[1] - kernel_size) // stride + 1,) |
904 |
| - |
905 |
| - kernel_size = (kernel_size, kernel_size) |
906 |
| - |
907 |
| - matrix_view = np.lib.stride_tricks.as_strided(matrix, |
908 |
| - shape=output_shape + kernel_size, |
909 |
| - strides=(stride * matrix.strides[0], stride * matrix.strides[1]) + matrix.strides) |
910 |
| - matrix_view = matrix_view.reshape(-1, *kernel_size) |
911 |
| - |
912 |
| - return matrix_view.max(axis=(1, 2)).reshape(output_shape) |
913 |
| - |
914 |
| - def nonMaxSuppression(self, result, pooled_result): |
915 |
| - return result * (result == pooled_result) |
916 |
| - |
917 | 771 | class REIDDetector(Detector):
|
918 | 772 |
|
919 | 773 | def postprocess(self, result):
|
|
0 commit comments