|
| 1 | +import os |
| 2 | +import numpy as np |
| 3 | +import librosa |
| 4 | +import tensorflow as tf |
| 5 | +from collections import Counter |
| 6 | + |
| 7 | +class EmotionAnalyzer: |
| 8 | + def __init__(self, model_path='modalx_emotion_model.h5'): |
| 9 | + self.model_path = model_path |
| 10 | + self.model = None |
| 11 | + self.emotions = ['angry', 'disgust', 'fear', 'happy', 'neutral', 'ps', 'sad', 'surprise', 'surprised'] |
| 12 | + self.sample_rate = 22050 |
| 13 | + self.duration = 3 |
| 14 | + self._load_model() |
| 15 | + |
| 16 | + def _load_model(self): |
| 17 | + if not os.path.exists(self.model_path): |
| 18 | + return |
| 19 | + try: |
| 20 | + self.model = tf.keras.models.load_model(self.model_path) |
| 21 | + except: |
| 22 | + pass |
| 23 | + |
| 24 | + def predict(self, audio_path): |
| 25 | + if self.model is None: return [], [], {} |
| 26 | + |
| 27 | + try: |
| 28 | + y_full, sr = librosa.load(audio_path, sr=self.sample_rate) |
| 29 | + except: return [], [], {} |
| 30 | + |
| 31 | + chunk_samples = self.duration * sr |
| 32 | + num_chunks = int(librosa.get_duration(y=y_full, sr=sr) / self.duration) |
| 33 | + |
| 34 | + times, emotions = [], [] |
| 35 | + |
| 36 | + for i in range(num_chunks): |
| 37 | + start = int(i * chunk_samples) |
| 38 | + y_chunk = y_full[start : int(start + chunk_samples)] |
| 39 | + |
| 40 | + if len(y_chunk) < chunk_samples: |
| 41 | + y_chunk = np.pad(y_chunk, (0, int(chunk_samples - len(y_chunk)))) |
| 42 | + |
| 43 | + mfccs = np.mean(librosa.feature.mfcc(y=y_chunk, sr=sr, n_mfcc=40).T, axis=0) |
| 44 | + pred = self.model.predict(np.expand_dims(np.expand_dims(mfccs, 0), 2), verbose=0) |
| 45 | + |
| 46 | + times.append(i * self.duration) |
| 47 | + emotions.append(self.emotions[np.argmax(pred)]) |
| 48 | + |
| 49 | + return times, emotions, dict(Counter(emotions)) |
0 commit comments