Skip to content

Commit 4dc4c1d

Browse files
committed
feat: enhance video analysis pipeline with configuration loading and error handling
1 parent ca74621 commit 4dc4c1d

File tree

1 file changed

+119
-14
lines changed

1 file changed

+119
-14
lines changed

app/video_pipeline.py

Lines changed: 119 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,27 +1,132 @@
1-
import time, threading
21

3-
def run_basic(video_path:str):
4-
# Quick metadata
5-
return {"duration": 123.0}
2+
import threading
3+
import yaml
4+
import traceback
5+
from src.processors.video_analyzer import VideoAnalyzer
6+
from src.processors.audio_analyzer import AudioAnalyzer
7+
from src.processors.scene_detector import SceneDetector
8+
from src.ai_models.emotion_detector import EmotionDetector
9+
from src.ai_models.intelligent_content_analyzer import IntelligentContentAnalyzer
10+
from src.suggestions.cut_suggester import CutSuggester
11+
import os
612

7-
def run_deep(video_path:str):
8-
# Simulate heavy analysis
9-
time.sleep(1.2)
10-
return {
11-
"emotions": {"happy":0.62,"neutral":0.25,"sad":0.13},
12-
"objects": [{"label":"person","confidence":0.94}],
13-
"cut_suggestions":[{"timestamp":28.4,"reason":"Scene shift","confidence":0.78}]
14-
}
13+
def load_config():
14+
with open(os.path.join(os.path.dirname(__file__), '../config.yaml'), 'r', encoding='utf-8') as f:
15+
return yaml.safe_load(f)
16+
17+
def run_basic(video_path:str, config):
18+
# Quick metadata using VideoAnalyzer
19+
try:
20+
va = VideoAnalyzer(config)
21+
meta = va.get_video_metadata(video_path)
22+
return {"duration": meta.get("duration", 0), "status": "basic_ready"}
23+
except Exception as e:
24+
return {"duration": 0, "error": str(e), "status": "basic_failed"}
25+
26+
def run_deep(video_path:str, config):
27+
# Full pipeline: video, audio, scene, emotion, content, cut suggestions
28+
try:
29+
va = VideoAnalyzer(config)
30+
aa = AudioAnalyzer(config)
31+
sd = SceneDetector(config)
32+
ed = EmotionDetector(config)
33+
ca = IntelligentContentAnalyzer(config)
34+
cs = CutSuggester(config)
35+
36+
# Video analysis
37+
video_meta = va.get_video_metadata(video_path)
38+
video_timeline = va.analyze_video_timeline(video_path)
39+
40+
# Audio extraction and analysis
41+
audio_path = aa.extract_audio_from_video(video_path)
42+
audio_features = aa.extract_audio_features(audio_path)
43+
speech_emotions = aa.analyze_speech_emotion(audio_path)
44+
speaker_changes = aa.detect_speaker_changes(audio_path)
45+
energy_timeline = aa.analyze_audio_energy(audio_path)
46+
47+
# Scene detection
48+
scenes = sd.detect_scenes(video_path, method='combined')
49+
50+
# Emotion detection (fuse text/audio/visual if available)
51+
# For demo, use only speech_emotions and visual features
52+
emotion_scores = {}
53+
if speech_emotions:
54+
# Use dominant emotion per chunk
55+
for seg in speech_emotions:
56+
for k, v in seg.get('all_emotions', {}).items():
57+
emotion_scores[k] = emotion_scores.get(k, 0) + v
58+
# Normalize
59+
total = sum(emotion_scores.values())
60+
if total > 0:
61+
emotion_scores = {k: v/total for k, v in emotion_scores.items()}
62+
else:
63+
emotion_scores = {"neutral": 1.0}
64+
65+
# Content analysis
66+
content_analysis = ca.analyze_content(
67+
video_path,
68+
audio_or_text={"features": audio_features, "speech_emotions": speech_emotions, "energy_timeline": energy_timeline},
69+
visual_or_metadata=video_timeline
70+
)
71+
72+
# Cut suggestions
73+
video_analysis = {
74+
"scene_changes": scenes,
75+
"timeline": video_timeline
76+
}
77+
audio_analysis = {
78+
"features": audio_features,
79+
"speech_emotions": speech_emotions,
80+
"speaker_changes": speaker_changes,
81+
"energy_timeline": energy_timeline
82+
}
83+
script_analysis = {} # Placeholder for future script/subtitle analysis
84+
cut_suggestions = cs.generate_suggestions(video_analysis, script_analysis, audio_analysis)
85+
86+
# Objects (from video timeline visual features)
87+
objects = []
88+
if video_timeline.get('visual_features'):
89+
# For demo, treat high-confidence visual concepts as objects
90+
for feats in video_timeline['visual_features']:
91+
for k, v in feats.items():
92+
if v > 0.7:
93+
objects.append({"label": k, "confidence": v})
94+
# Deduplicate
95+
seen = set()
96+
unique_objects = []
97+
for o in objects:
98+
if o['label'] not in seen:
99+
unique_objects.append(o)
100+
seen.add(o['label'])
101+
102+
# Clean up temp audio
103+
try:
104+
aa.cleanup_temp_files([audio_path])
105+
except Exception:
106+
pass
107+
108+
return {
109+
"duration": video_meta.get("duration", 0),
110+
"emotions": emotion_scores,
111+
"objects": unique_objects,
112+
"cut_suggestions": [dict(timestamp=s.timestamp, reason=s.reason, confidence=s.confidence) for s in cut_suggestions],
113+
"scene_changes": scenes,
114+
"content": content_analysis,
115+
"status": "deep_ready"
116+
}
117+
except Exception as e:
118+
return {"error": str(e), "traceback": traceback.format_exc(), "status": "deep_failed"}
15119

16120
class VideoPipeline:
17121
def __init__(self, on_partial, on_complete):
18122
self.on_partial = on_partial
19123
self.on_complete = on_complete
124+
self.config = load_config()
20125

21126
def start(self, video_path:str):
22127
def worker():
23-
basic = run_basic(video_path)
128+
basic = run_basic(video_path, self.config)
24129
self.on_partial(basic)
25-
deep = run_deep(video_path)
130+
deep = run_deep(video_path, self.config)
26131
self.on_complete(deep)
27132
threading.Thread(target=worker, daemon=True).start()

0 commit comments

Comments
 (0)