-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathapp.py
More file actions
337 lines (259 loc) · 12.1 KB
/
app.py
File metadata and controls
337 lines (259 loc) · 12.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
from flask import Flask, render_template, request, jsonify, send_from_directory
import os
import cv2
import numpy as np
import pandas as pd
import torch
from werkzeug.utils import secure_filename
import threading
import json
from datetime import datetime
# Import existing modules without modification
from utils import (read_video, save_video, match_shots_with_bounces, save_to_csv)
from court_line_detector import CourtLineDetector
from TrackNet import (BallTrackerNet, infer_model, remove_outliers, split_track,
interpolation, write_track, interpolate_full_track, BounceDetector)
import gc
app = Flask(__name__)
# Configuration
UPLOAD_FOLDER = 'uploads'
OUTPUT_FOLDER = 'output_videos'
ALLOWED_EXTENSIONS = {'mp4', 'avi', 'mov'}
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['MAX_CONTENT_LENGTH'] = 500 * 1024 * 1024 # 500MB max file size
# Ensure folders exist
os.makedirs(UPLOAD_FOLDER, exist_ok=True)
os.makedirs(OUTPUT_FOLDER, exist_ok=True)
# Global variable to track processing status
processing_status = {
'is_processing': False,
'progress': 0,
'stage': 'idle',
'message': 'Ready',
'video_id': None,
'error': None
}
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def run_shot_classification_safe(video_path, model_path, left_handed=True):
"""Safely run shot classification with TensorFlow isolation"""
# Force TensorFlow to CPU before import
original_cuda_visible = os.environ.get('CUDA_VISIBLE_DEVICES', '0')
os.environ['CUDA_VISIBLE_DEVICES'] = '' # Hide GPU from TensorFlow
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Reduce TF logging
try:
# Import TensorFlow-based shot classification only when needed
from tennis_shot_recognition import run_shot_classification
# Run shot classification on CPU
result = run_shot_classification(video_path, model_path, left_handed)
return result
except Exception as e:
print(f"Error in shot classification: {e}")
return []
finally:
# Restore CUDA visibility for PyTorch
os.environ['CUDA_VISIBLE_DEVICES'] = original_cuda_visible
def update_status(stage, message, progress):
"""Update global processing status"""
global processing_status
processing_status['stage'] = stage
processing_status['message'] = message
processing_status['progress'] = progress
print(f"[{stage}] {message} ({progress}%)")
def process_video(video_path, left_handed=True):
"""Process video using existing main.py logic without modification"""
global processing_status
try:
processing_status['is_processing'] = True
processing_status['error'] = None
# Extract video ID
video_filename = os.path.basename(video_path)
video_id = os.path.splitext(video_filename)[0]
processing_status['video_id'] = video_id
update_status('reading', 'Reading video file...', 5)
# Read video and create video ID
video_frames, fps = read_video(video_path)
frame_height = video_frames[0].shape[0]
update_status('court_detection', 'Detecting tennis court...', 15)
# === Court detection ===
court_model_path = "models/keypoints_model.pth"
court_line_detector = CourtLineDetector(court_model_path)
COURT_DETECTION_INTERVAL = 30 # Detect court every 30 frames
all_keypoints = []
last_valid_keypoints = None
total_frames = len(video_frames)
for i, frame in enumerate(video_frames):
if i % COURT_DETECTION_INTERVAL == 0 or last_valid_keypoints is None:
raw_keypoints = court_line_detector.predict(frame)
edges = court_line_detector.get_edges(frame)
lines = court_line_detector.get_hough_lines(edges)
court_keypoints = court_line_detector.snap_keypoints_to_lines(raw_keypoints, lines)
last_valid_keypoints = court_keypoints
else:
court_keypoints = last_valid_keypoints
all_keypoints.append(court_keypoints)
# Update progress
if i % 100 == 0:
progress = 15 + int((i / total_frames) * 15)
update_status('court_detection', f'Detecting court... ({i}/{total_frames} frames)', progress)
gc.collect()
update_status('court_zones', 'Dividing court into zones...', 35)
# Divide opponent side of the court into 4 zones
divided_img, opp_court_zones = court_line_detector.divide_opp_court(video_frames[0].copy(), court_keypoints)
cv2.imwrite("output_videos/opponent_court_zones.jpg", divided_img)
update_status('ball_tracking', 'Tracking tennis ball...', 40)
# === TrackNet Ball tracking ===
ball_model = BallTrackerNet()
device = 'cuda' if torch.cuda.is_available() and torch.cuda.device_count() > 0 else 'cpu'
print(f"Using device: {device}")
ball_model.load_state_dict(torch.load('models/TrackNet.pt', map_location=device))
ball_model = ball_model.to(device)
ball_model.eval()
with torch.no_grad():
ball_track, dists = infer_model(video_frames, ball_model, device)
update_status('ball_tracking', 'Processing ball trajectory...', 55)
ball_track = remove_outliers(ball_track, dists)
ball_track = interpolate_full_track(ball_track)
extrapolate = True
if extrapolate == True:
subtracks = split_track(ball_track)
for r in subtracks:
ball_subtrack = ball_track[r[0]:r[1]]
ball_subtrack = interpolation(ball_subtrack)
ball_track[r[0]:r[1]] = ball_subtrack
output_frames = write_track(video_frames, ball_track)
update_status('bounce_detection', 'Detecting ball bounces...', 65)
# Bounce Detection
path_bounce_model = "models/ctb_regr_bounce.cbm"
bounce_detector = BounceDetector(path_bounce_model)
x_ball = [x[0] for x in ball_track]
y_ball = [x[1] for x in ball_track]
opponent_bounce_frames = bounce_detector.predict_far_side_only(x_ball, y_ball, frame_height)
# Zone bounce detection
bounce_points = [(ball_track[f][0], ball_track[f][1]) for f in opponent_bounce_frames]
valid_points, bounce_zones = court_line_detector.get_bounce_zones(bounce_points, opp_court_zones)
opponent_bounce_frames = [
frame for frame, point in zip(opponent_bounce_frames, bounce_points)
if point in valid_points
]
print("Filtered opponent-side bounces frames:", opponent_bounce_frames)
print("Filtered bounce zones:", bounce_zones)
update_status('annotating', 'Annotating video...', 70)
# Draw per-frame keypoints
for i, (frame, keypoints) in enumerate(zip(output_frames, all_keypoints)):
output_frames[i] = court_line_detector.draw_keypoints(frame, keypoints)
cv2.putText(output_frames[i], f"Frame: {i}", (10,30),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
# Clean up GPU memory before TensorFlow usage
if device == 'cuda':
del ball_model
torch.cuda.empty_cache()
update_status('saving_video', 'Saving annotated video...', 75)
# Save video
output_video_path = "output_videos/output_video.avi"
try:
save_video(output_frames, output_video_path)
if os.path.exists(output_video_path):
test_cap = cv2.VideoCapture(output_video_path)
if test_cap.isOpened():
test_cap.release()
video_path_for_classification = output_video_path
else:
video_path_for_classification = video_path
else:
video_path_for_classification = video_path
except Exception as e:
print(f"Error saving video: {e}")
video_path_for_classification = video_path
update_status('shot_classification', 'Classifying tennis shots...', 85)
# Run shot classification with TensorFlow isolation
rnn_model_path = "models/tennis_rnn.h5"
detected_shots = run_shot_classification_safe(video_path_for_classification, rnn_model_path, left_handed)
print("Detected shots:", detected_shots)
update_status('saving_results', 'Saving results...', 95)
# === Match shots with bounces and save to CSV ===
if detected_shots and opponent_bounce_frames and bounce_zones:
matched_data = match_shots_with_bounces(detected_shots, opponent_bounce_frames, bounce_zones)
save_to_csv(matched_data, video_id)
else:
if not detected_shots:
print("Warning: No shots detected.")
if not opponent_bounce_frames:
print("Warning: No opponent bounce frames detected.")
if not bounce_zones:
print("Warning: No bounce zones detected.")
update_status('complete', f'Analysis complete! Results saved for {video_id}', 100)
processing_status['is_processing'] = False
except Exception as e:
processing_status['error'] = str(e)
processing_status['is_processing'] = False
update_status('error', f'Error: {str(e)}', 0)
print(f"Error processing video: {e}")
raise
@app.route('/')
def index():
"""Serve the main page"""
return render_template('index.html')
@app.route('/upload', methods=['POST'])
def upload_file():
"""Handle video upload and start processing"""
global processing_status
if processing_status['is_processing']:
return jsonify({'error': 'A video is already being processed'}), 400
if 'video' not in request.files:
return jsonify({'error': 'No video file provided'}), 400
file = request.files['video']
left_handed = request.form.get('left_handed', 'false').lower() == 'true'
if file.filename == '':
return jsonify({'error': 'No file selected'}), 400
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
file.save(filepath)
# Start processing in a separate thread
thread = threading.Thread(target=process_video, args=(filepath, left_handed))
thread.start()
return jsonify({
'message': 'Video uploaded successfully. Processing started.',
'filename': filename
})
return jsonify({'error': 'Invalid file type. Please upload MP4, AVI, or MOV'}), 400
@app.route('/status')
def get_status():
"""Get current processing status"""
return jsonify(processing_status)
@app.route('/results/<filename>')
def download_result(filename):
"""Download result files"""
return send_from_directory(OUTPUT_FOLDER, filename)
@app.route('/results')
def list_results():
"""List available result files"""
try:
files = []
if os.path.exists(OUTPUT_FOLDER):
for filename in os.listdir(OUTPUT_FOLDER):
filepath = os.path.join(OUTPUT_FOLDER, filename)
if os.path.isfile(filepath):
files.append({
'name': filename,
'size': os.path.getsize(filepath),
'modified': datetime.fromtimestamp(os.path.getmtime(filepath)).isoformat()
})
# Also check for CSV results
if os.path.exists('tennis_analysis_results.csv'):
files.append({
'name': 'tennis_analysis_results.csv',
'size': os.path.getsize('tennis_analysis_results.csv'),
'modified': datetime.fromtimestamp(os.path.getmtime('tennis_analysis_results.csv')).isoformat()
})
return jsonify({'files': files})
except Exception as e:
return jsonify({'error': str(e)}), 500
if __name__ == '__main__':
print("="*60)
print("\nStarting web server...")
print("\nAccess the application at: http://localhost:5001")
print("\nPress Ctrl+C to stop the server")
print("="*60)
app.run(debug=False, host='127.0.0.1', port=5001, threaded=True)