-
Notifications
You must be signed in to change notification settings - Fork 28
Expand file tree
/
Copy pathkeypoints_from_video.py
More file actions
73 lines (58 loc) · 1.65 KB
/
keypoints_from_video.py
File metadata and controls
73 lines (58 loc) · 1.65 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import tensorflow as tf
import cv2
import time
import math
import numpy
import numpy as np
import posenet
from pose import Pose
from score import Score
import pickle
import argparse
#USAGE : python3 keypoints_from_video.py --activity "punch - side" --video "test.mp4"
ap = argparse.ArgumentParser()
ap.add_argument("-a", "--activity", required=True,
help="activity to be recorder")
ap.add_argument("-v", "--video", required=True,
help="video file from which keypoints are to be extracted")
ap.add_argument("-l", "--lookup", default="lookup_new.pickle",
help="The pickle file to dump the lookup table")
args = vars(ap.parse_args())
def main():
a = Pose()
b = []
c = {}
with tf.Session() as sess:
model_cfg, model_outputs = posenet.load_model(101, sess)
cap = cv2.VideoCapture(args["video"])
i = 1
if cap.isOpened() is False:
print("error in opening video")
while cap.isOpened():
ret_val, image = cap.read()
if ret_val:
image = cv2.resize(image,(372,495))
input_points,input_black_image = a.getpoints_vis(image,sess,model_cfg,model_outputs)
input_points = input_points[0:34]
print(input_points)
input_new_coords = a.roi(input_points)
input_new_coords = input_new_coords[0:34]
input_new_coords = np.asarray(input_new_coords).reshape(17,2)
b.append(input_new_coords)
cv2.imshow("black", input_black_image)
cv2.waitKey(1)
i = i + 1
else:
break
cap.release()
b = np.array(b)
cv2.destroyAllWindows
print(b)
print(b.shape)
print("Lookup Table Created")
c[args["activity"]] = b
f = open(args["lookup"],'wb')
pickle.dump(c,f)
# pickle.dump()
if __name__ == "__main__":
main()