|
1 | 1 | import streamlit as st |
2 | 2 | import time |
3 | 3 | import os |
| 4 | +import pandas as pd |
| 5 | +import plotly.express as px |
4 | 6 | from backend import ModalXSystem |
| 7 | +from emotion_engine import EmotionAnalyzer |
5 | 8 |
|
6 | 9 | st.set_page_config( |
7 | 10 | page_title="DIU Presentation Grader", |
|
12 | 15 |
|
13 | 16 | st.markdown(""" |
14 | 17 | <style> |
15 | | - .stApp { |
16 | | - background-color: #0E1117; |
17 | | - color: #FAFAFA; |
18 | | - } |
19 | | - |
20 | | - h1, h2, h3, h4, h5, h6, p, li, span { |
21 | | - color: #FAFAFA !important; |
22 | | - } |
23 | | - .stMarkdown, .stText { |
24 | | - color: #E0E0E0 !important; |
25 | | - } |
| 18 | + .stApp { background-color: #0E1117; color: #FAFAFA; } |
| 19 | + h1, h2, h3, h4, h5, h6, p, li, span { color: #FAFAFA !important; } |
| 20 | + .stMarkdown, .stText { color: #E0E0E0 !important; } |
26 | 21 | |
27 | 22 | .main-header { |
28 | 23 | background: linear-gradient(135deg, #4b6cb7 0%, #182848 100%); |
|
85 | 80 |
|
86 | 81 | st.markdown('<div class="main-header"><h1>🎓 DIU Smart Faculty Grader</h1><p>AI-Powered Multi-Modal Presentation Assessment</p></div>', unsafe_allow_html=True) |
87 | 82 |
|
| 83 | +@st.cache_resource |
| 84 | +def load_emotion_engine(): |
| 85 | + return EmotionAnalyzer() |
| 86 | + |
88 | 87 | with st.sidebar: |
89 | 88 | st.header("📋 Student Details") |
90 | 89 | s_name = st.text_input("Student Name", placeholder="e.g. Muntasir Islam") |
|
126 | 125 | with st.spinner("⚡ Booting AI Engine..."): |
127 | 126 | st.session_state.modalx = ModalXSystem() |
128 | 127 |
|
| 128 | + try: |
| 129 | + emotion_engine = load_emotion_engine() |
| 130 | + except Exception as e: |
| 131 | + st.error(f"Could not load Emotion Model: {e}") |
| 132 | + st.stop() |
| 133 | + |
129 | 134 | proc_col1, proc_col2 = st.columns([1, 1]) |
130 | 135 |
|
131 | 136 | with proc_col1: |
|
136 | 141 | else: |
137 | 142 | st.video(video_path) |
138 | 143 |
|
| 144 | + results = None |
| 145 | + emotion_results = None |
| 146 | + |
139 | 147 | with proc_col2: |
140 | 148 | with st.status("🚀 ModalX Engine Running...", expanded=True) as status: |
141 | 149 | st.write("🔄 Initializing Neural Networks...") |
142 | 150 | time.sleep(1) |
143 | 151 |
|
144 | | - st.write("🔊 Extracting Audio Layer...") |
145 | | - st.write("🧠 Running Whisper (Speech-to-Text)...") |
146 | | - |
| 152 | + st.write("🧠 Running General Assessment (Whisper + CV)...") |
147 | 153 | try: |
148 | 154 | results = st.session_state.modalx.analyze(video_path, s_name, s_id, is_url) |
149 | | - |
150 | | - if results: |
151 | | - st.write("👁️ Scanning Visual Frames (Face/Slide Detection)...") |
152 | | - st.write("📊 Calculating Rubric Scores...") |
153 | | - time.sleep(0.5) |
154 | | - status.update(label="✅ Analysis Complete!", state="complete", expanded=False) |
155 | 155 | except Exception as e: |
156 | | - st.error(f"Engine Error: {e}") |
| 156 | + st.error(f"General Engine Error: {e}") |
| 157 | + |
| 158 | + if video_path and os.path.exists(video_path) and not is_url: |
| 159 | + st.write("🎭 Analyzing Emotional Tones (CNN-1D)...") |
| 160 | + try: |
| 161 | + e_times, e_emotions, e_summary = emotion_engine.predict(video_path) |
| 162 | + emotion_results = { |
| 163 | + "times": e_times, |
| 164 | + "emotions": e_emotions, |
| 165 | + "summary": e_summary |
| 166 | + } |
| 167 | + except Exception as e: |
| 168 | + st.warning(f"Emotion Analysis skipped: {e}") |
| 169 | + elif is_url: |
| 170 | + st.warning("⚠️ Emotion Graph unavailable for external URLs (Download required)") |
| 171 | + |
| 172 | + if results: |
| 173 | + status.update(label="✅ Analysis Complete!", state="complete", expanded=False) |
| 174 | + else: |
157 | 175 | status.update(label="❌ Analysis Failed", state="error") |
158 | | - results = None |
159 | 176 |
|
160 | 177 | if results: |
161 | 178 | st.divider() |
162 | 179 | st.balloons() |
163 | 180 |
|
164 | 181 | score = results['score'] |
| 182 | + |
165 | 183 | grade = "F" |
166 | 184 | grade_bg = "#dc3545" |
167 | 185 | if score >= 80: grade, grade_bg = "A+", "#198754" |
|
191 | 209 |
|
192 | 210 | st.divider() |
193 | 211 |
|
194 | | - tab1, tab2 = st.tabs(["📊 Performance Metrics", "📝 Transcript & Feedback"]) |
| 212 | + tab1, tab2, tab3 = st.tabs(["📊 Performance Metrics", "🎭 Emotional Intelligence", "📝 Transcript & Feedback"]) |
195 | 213 |
|
196 | 214 | with tab1: |
197 | 215 | col_a, col_b = st.columns(2) |
|
217 | 235 | with col_b: |
218 | 236 | visual = results['metrics']['visual'] |
219 | 237 |
|
220 | | - if visual['is_slide_mode']: |
| 238 | + if visual.get('is_slide_mode', False): |
221 | 239 | st.subheader("🖼️ Slide Design AI") |
222 | 240 | st.info("Scanner Mode: Slide Presentation") |
223 | 241 | slides = results['metrics']['slides'] |
|
237 | 255 | st.progress(float(visual['posture_score']/100)) |
238 | 256 |
|
239 | 257 | with tab2: |
| 258 | + if emotion_results and emotion_results['times']: |
| 259 | + st.subheader("📈 Emotional Flow Over Time") |
| 260 | + |
| 261 | + e_times = emotion_results['times'] |
| 262 | + e_emotions = emotion_results['emotions'] |
| 263 | + e_summary = emotion_results['summary'] |
| 264 | + |
| 265 | + df = pd.DataFrame({"Time (s)": e_times, "Emotion": e_emotions}) |
| 266 | + emotion_order = sorted(list(set(e_emotions))) |
| 267 | + |
| 268 | + fig = px.scatter( |
| 269 | + df, x="Time (s)", y="Emotion", color="Emotion", |
| 270 | + size=[15]*len(df), template="plotly_dark", |
| 271 | + category_orders={"Emotion": emotion_order}, |
| 272 | + title="Speaker Emotion Timeline" |
| 273 | + ) |
| 274 | + fig.update_traces(mode='lines+markers', line=dict(width=1, color='gray')) |
| 275 | + fig.update_layout(height=400, paper_bgcolor="#0E1117", plot_bgcolor="#0E1117") |
| 276 | + st.plotly_chart(fig, use_container_width=True) |
| 277 | + |
| 278 | + c_pie, c_dom = st.columns([1, 1]) |
| 279 | + with c_pie: |
| 280 | + st.markdown("##### Emotion Distribution") |
| 281 | + fig_pie = px.pie( |
| 282 | + names=list(e_summary.keys()), |
| 283 | + values=list(e_summary.values()), |
| 284 | + hole=0.4, template="plotly_dark" |
| 285 | + ) |
| 286 | + fig_pie.update_layout(paper_bgcolor="#0E1117") |
| 287 | + st.plotly_chart(fig_pie, use_container_width=True) |
| 288 | + |
| 289 | + with c_dom: |
| 290 | + dom_emotion = max(e_summary, key=e_summary.get) |
| 291 | + st.markdown("##### Analysis") |
| 292 | + st.metric("Dominant Tone", dom_emotion.upper()) |
| 293 | + |
| 294 | + if dom_emotion in ['happy', 'neutral', 'surprise', 'surprised']: |
| 295 | + st.success("The speaker maintains a **Positive/Confident** tone.") |
| 296 | + elif dom_emotion in ['fear', 'sad']: |
| 297 | + st.warning("The speaker seems **Nervous or Low Energy**. Needs more enthusiasm.") |
| 298 | + elif dom_emotion in ['angry', 'disgust', 'anger']: |
| 299 | + st.error("The speaker sounds **Aggressive/Frustrated**. Needs a softer tone.") |
| 300 | + else: |
| 301 | + st.info("Emotion analysis is not available for this file type or URL.") |
| 302 | + |
| 303 | + with tab3: |
240 | 304 | fb_col1, fb_col2 = st.columns([2, 1]) |
241 | 305 |
|
242 | 306 | with fb_col1: |
|
259 | 323 | mime="application/pdf" |
260 | 324 | ) |
261 | 325 |
|
262 | | - if video_path and os.path.exists(video_path): |
| 326 | + if video_path and os.path.exists(video_path) and not is_url: |
263 | 327 | os.remove(video_path) |
0 commit comments