Skip to content
This repository was archived by the owner on Aug 10, 2022. It is now read-only.

Commit e156c2f

Browse files
authored
Merge pull request #10 from jaredb1011/dev
added optical flow scripts
2 parents e8bda77 + cb88b5d commit e156c2f

File tree

2 files changed

+269
-0
lines changed

2 files changed

+269
-0
lines changed
Lines changed: 98 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,98 @@
1+
"""Adapted from the OpenCV optical flow documentation code: https://docs.opencv.org/4.5.3/d4/dee/tutorial_optical_flow.html"""
2+
3+
""" ABOUT-----------------------------------------------------------------------
4+
Farneback Optical Flow calculates the optical flow (motion) of every pixel in a video clip.
5+
Right now, in the result visualization, the intensity of a pixel's motion will change both it's color and magnitude.
6+
Brighter pixels have more motion.
7+
The output visualization is stored in the same location as the input video with the name <input_vid_filename>_FB_FLOW.mp4
8+
9+
The idea is that perhaps the data about how certain pixels/features are moving across the screen could be used to figure out how the player camera / aim was changing.
10+
"""
11+
12+
import numpy as np
13+
import cv2 as cv
14+
15+
# PARAMETERS--------------------------------
16+
17+
# path to input video file
18+
vidpath = r""
19+
20+
# do you want to save the output video?
21+
savevid = True
22+
23+
# fps of input/output video
24+
fps = 30
25+
26+
# farneback parameters
27+
pyr_scale = 0.5 # default 0.5
28+
levels = 5 # default 3
29+
winsize = 15 # default 15
30+
iterations = 3 # default 3
31+
poly_n = 2 # default 5
32+
poly_sigma = 1.2 # default 1.2
33+
flags = cv.OPTFLOW_FARNEBACK_GAUSSIAN
34+
35+
# SETUP ------------------------------------
36+
37+
# load video into memory
38+
cap = cv.VideoCapture(vidpath)
39+
40+
# read first frame
41+
_, old_frame = cap.read()
42+
old_frame_gray = cv.cvtColor(old_frame, cv.COLOR_BGR2GRAY)
43+
44+
# create black result image
45+
hsv_img = np.zeros_like(old_frame)
46+
hsv_img[...,1] = 255
47+
48+
# if saving video
49+
if savevid:
50+
# path to save output video
51+
savepath = vidpath.split('.')[0] + '_FB_FLOW' + '.mp4'
52+
53+
# get shape of video frames
54+
height, width, channels = old_frame.shape
55+
56+
# setup videowriter object
57+
fourcc = cv.VideoWriter_fourcc(*'mp4v')
58+
videoOut = cv.VideoWriter(savepath, fourcc, fps, (width, height))
59+
60+
# PROCESS VIDEO ---------------------------
61+
while(True):
62+
# get frame and convert to grayscale
63+
_, new_frame = cap.read()
64+
new_frame_gray = cv.cvtColor(new_frame, cv.COLOR_BGR2GRAY)
65+
66+
# do Farneback optical flow
67+
flow = cv.calcOpticalFlowFarneback(old_frame_gray, new_frame_gray, None, pyr_scale, levels, winsize, iterations, poly_n, poly_sigma, flags)
68+
69+
# conversion
70+
mag, ang = cv.cartToPolar(flow[...,0], flow[...,1])
71+
72+
# draw onto the result image - color is determined by direction, brightness is by magnitude of motion
73+
#hsv_img[...,0] = ang*180/np.pi/2
74+
#hsv_img[...,2] = cv.normalize(mag, None, 0, 255, cv.NORM_MINMAX)
75+
76+
# color and brightness by magnitude
77+
hsv_img[...,0] = cv.normalize(mag, None, 0, 255, cv.NORM_MINMAX)
78+
hsv_img[...,1] = cv.normalize(mag, None, 0, 255, cv.NORM_MINMAX)
79+
hsv_img[...,2] = cv.normalize(mag, None, 0, 255, cv.NORM_MINMAX)
80+
81+
bgr_img = cv.cvtColor(hsv_img, cv.COLOR_HSV2BGR)
82+
83+
# show the image and break out if ESC pressed
84+
cv.imshow('Farneback Optical Flow', bgr_img)
85+
k = cv.waitKey(30) & 0xff
86+
if k == 27:
87+
break
88+
89+
# write frames to new output video
90+
if savevid:
91+
videoOut.write(bgr_img)
92+
93+
# set old frame to new
94+
old_frame_gray = new_frame_gray
95+
96+
# cleanup
97+
videoOut.release()
98+
cv.destroyAllWindows()
Lines changed: 171 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,171 @@
1+
"""Adapted from the OpenCV optical flow documentation code: https://docs.opencv.org/4.5.3/d4/dee/tutorial_optical_flow.html"""
2+
3+
""" ABOUT-----------------------------------------------------------------------
4+
Lucas Kanade Optical Flow calculates the optical flow (motion) of specific features in a video clip.
5+
The Shi-Tomasi corner detection is used to pick points in the video that are easy for to track.
6+
The optical flow algorithm will track where those features move.
7+
The visualization will draw a point over the tracked features and a trail of where the feature has been.
8+
The program currently outputs the result video to the same location as your input video, with the name <input_vid_filename>_LK_FLOW.mp4
9+
10+
The idea is that perhaps the data about how certain pixels/features are moving across the screen could be used to figure out how the player camera / aim was changing.
11+
"""
12+
13+
import cv2 as cv
14+
import numpy as np
15+
16+
# PARAMETERS------------------------------------------------------------------
17+
18+
# path to input videofile
19+
vidpath = r""
20+
21+
# do you want to save the video?
22+
savevid = True
23+
24+
# do you want to preview the output?
25+
previewWindow = True
26+
27+
# output video params
28+
fps = 20 # fps of output video, should match input video
29+
30+
# visualization parameters
31+
numPts = 5 # max number of points to track
32+
trailLength = 60 # how many frames to keep a fading trail behind a tracked point to show motion
33+
trailThickness = 8 # thickness of the trail to draw behind the target
34+
trailFade = 4 # the intensity at which the trail fades
35+
pointSize = 15 # pixel radius of the circle to draw over tracked points
36+
37+
# params for Shi-Tomasi corner detection
38+
shitomasi_params = {
39+
"qualityLevel": 0.3,
40+
"minDistance": 7,
41+
"blockSize": 7
42+
}
43+
44+
# params for Lucas-Kanade optical flow
45+
LK_params = {
46+
"winSize": (15,15),
47+
"maxLevel": 2,
48+
"criteria": (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03)
49+
}
50+
51+
52+
# SETUP -----------------------------------------------------------------------
53+
54+
# generate random colors
55+
color = np.random.randint(0,255,(100,3))
56+
57+
# read the video file into memory
58+
cap = cv.VideoCapture(vidpath)
59+
60+
# get the first frame
61+
_, old_frame = cap.read()
62+
old_gray = cv.cvtColor(old_frame, cv.COLOR_BGR2GRAY)
63+
64+
# get resolution of video
65+
res_x = len(old_frame[0])
66+
res_y = len(old_frame)
67+
68+
# create crosshair mask
69+
crosshair_bottom = int(0.7*res_y)
70+
crosshair_top = int(0.3*res_y)
71+
crosshair_left = int(0.3*res_x)
72+
crosshair_right = int(0.7*res_x)
73+
crosshairmask = np.zeros(old_frame.shape[:2], dtype="uint8")
74+
cv.rectangle(crosshairmask, (crosshair_left, crosshair_top), (crosshair_right, crosshair_bottom), 255, -1)
75+
76+
# create masks for drawing purposes
77+
trail_history = [[[(0,0), (0,0)] for i in range(trailLength)] for i in range(numPts)]
78+
79+
# get features from first frame
80+
print(f"\nRunning Optical Flow on: {vidpath}")
81+
old_points = cv.goodFeaturesToTrack(old_gray, maxCorners=numPts, mask=crosshairmask, **shitomasi_params)
82+
83+
# if saving video
84+
if savevid:
85+
# path to save output video
86+
pathparts = vidpath.split('.')
87+
savepath = '.'+ vidpath.split('.')[-2] + '_LK_FLOW' + '.mp4'
88+
print(f"Saving Output video to: {savepath}")
89+
90+
# get shape of video frames
91+
height, width, channels = old_frame.shape
92+
93+
# setup videowriter object
94+
fourcc = cv.VideoWriter_fourcc(*'mp4v')
95+
videoOut = cv.VideoWriter(savepath, fourcc, fps, (width, height))
96+
97+
# PROCESS VIDEO ---------------------------------------------------------------
98+
while(True):
99+
# get next frame and convert to grayscale
100+
stillGoing, new_frame = cap.read()
101+
102+
# if video is over, quit
103+
if not stillGoing:
104+
break
105+
106+
# convert to grayscale
107+
new_frame_gray = cv.cvtColor(new_frame, cv.COLOR_BGR2GRAY)
108+
109+
# calculate optical flow
110+
new_points, st, err = cv.calcOpticalFlowPyrLK(old_gray, new_frame_gray, old_points, None, **LK_params)
111+
112+
# select good points
113+
if old_points is not None:
114+
good_new = new_points[st==1]
115+
good_old = old_points[st==1]
116+
117+
# create trail mask to add to image
118+
trailMask = np.zeros_like(old_frame)
119+
120+
# calculate motion lines and points
121+
for i,(new,old) in enumerate(zip(good_new, good_old)):
122+
# flatten coords
123+
a,b = new.ravel()
124+
c,d = old.ravel()
125+
126+
# list of the prev and current points converted to int
127+
linepts = [(int(a),int(b)), (int(c),int(d))]
128+
129+
# add points to the trail history
130+
trail_history[i].insert(0, linepts)
131+
132+
# get color for this point
133+
pointColor = color[i].tolist()
134+
135+
# add trail lines
136+
for j in range(len(trail_history[i])):
137+
trailColor = [int( pointColor[0] - (trailFade*j) ), int( pointColor[1] - (trailFade*j) ), int( pointColor[2] - (trailFade*j) )] # fading colors
138+
trailMask = cv.line(trailMask, trail_history[i][j][0], trail_history[i][j][1], trailColor, thickness=trailThickness, lineType=cv.LINE_AA)
139+
140+
# get rid of the trail segment
141+
trail_history[i].pop()
142+
143+
# add circle over the point
144+
new_frame = cv.circle(new_frame, trail_history[i][0][0], pointSize, color[i].tolist(), -1)
145+
146+
# add trail to frame
147+
img = cv.add(new_frame, trailMask)
148+
149+
# show the frames
150+
if previewWindow:
151+
cv.imshow('optical flow', img)
152+
153+
# write frames to new output video
154+
if savevid:
155+
videoOut.write(img)
156+
157+
# kill window if ESC is pressed
158+
k = cv.waitKey(30) & 0xff
159+
if k == 27:
160+
break
161+
162+
# update previous frame and previous points
163+
old_gray = new_frame_gray.copy()
164+
old_points = good_new.reshape(-1,1,2)
165+
166+
# if old_points < numPts, get new points
167+
if (numPts - len(old_points)) > 0:
168+
old_points = cv.goodFeaturesToTrack(old_gray, maxCorners=numPts, mask=crosshairmask, **shitomasi_params)
169+
170+
# after video is finished
171+
print('\nComplete!\n')

0 commit comments

Comments
 (0)