-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathevaluator.py
More file actions
54 lines (43 loc) · 2.11 KB
/
evaluator.py
File metadata and controls
54 lines (43 loc) · 2.11 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
import mlflow
import pandas as pd
from app import SentimentAnalyzer
from sklearn.metrics import accuracy_score, classification_report
import json
def run_evaluation():
# 1. Данные для оценки (Benchmark / Gold Standard)
# ИИ-инженер фокусируется на качестве данных для оценки системы
test_data = [
{"text": "I love this!", "label": "POSITIVE"},
{"text": "This is terrible.", "label": "NEGATIVE"},
{"text": "I am so happy with the result.", "label": "POSITIVE"},
{"text": "What a disaster.", "label": "NEGATIVE"},
{"text": "The service was okay, not great.", "label": "NEGATIVE"}, # Сложный случай
]
df = pd.DataFrame(test_data)
# 2. Инициализация системы (Готового модуля)
system = SentimentAnalyzer()
# 3. Настройка MLflow (Observability)
# Явно указываем писать логи в папку ./mlruns
mlflow.set_tracking_uri("file:./mlruns")
mlflow.set_experiment("AI_System_Integration_Evals")
with mlflow.start_run():
# Логируем версию системы как параметр
mlflow.log_param("model_name", "distilbert-sst-2")
mlflow.log_param("task", "sentiment-analysis")
# 4. Прогон оценки
predictions = []
for text in df['text']:
res = system.analyze(text)
predictions.append(res['label'])
# 5. Сбор метрик качества
accuracy = accuracy_score(df['label'], predictions)
report = classification_report(df['label'], predictions, output_dict=True)
print(f"Evaluation Accuracy: {accuracy}")
# 6. Логирование результатов
mlflow.log_metric("eval_accuracy", accuracy)
with open("eval_report.json", "w") as f:
json.dump(report, f, indent=4)
mlflow.log_artifact("eval_report.json")
print("Оценка завершена. Результаты в MLflow.")
if __name__ == "__main__":
run_evaluation()