Skip to content

Commit 75fc2f8

Browse files
authored
feat: Add evaluation metrics module with accuracy, precision, and recall calculations
This PR adds comprehensive evaluation metrics module for the Medical Appointment No-Shows model including accuracy, precision, recall, F1-score, confusion matrix, and ROC-AUC calculations.
1 parent 8b24b72 commit 75fc2f8

File tree

1 file changed

+99
-0
lines changed

1 file changed

+99
-0
lines changed
Lines changed: 99 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,99 @@
1+
"""Evaluation metrics module for Medical Appointment No-Shows model.
2+
3+
This module provides comprehensive evaluation metrics including accuracy,
4+
precision, recall, F1-score, confusion matrix, and ROC-AUC score.
5+
"""
6+
7+
import numpy as np
8+
import matplotlib.pyplot as plt
9+
from sklearn.metrics import (
10+
accuracy_score, precision_score, recall_score, f1_score,
11+
confusion_matrix, roc_auc_score, classification_report,
12+
roc_curve, auc
13+
)
14+
import seaborn as sns
15+
16+
17+
class MetricsEvaluator:
18+
"""Evaluate machine learning model performance with comprehensive metrics."""
19+
20+
def __init__(self):
21+
self.metrics = {}
22+
self.predictions = None
23+
self.actual = None
24+
25+
def calculate_metrics(self, y_true, y_pred, y_pred_proba=None):
26+
"""Calculate comprehensive evaluation metrics.
27+
28+
Args:
29+
y_true: True labels
30+
y_pred: Predicted labels
31+
y_pred_proba: Predicted probabilities (for ROC-AUC)
32+
33+
Returns:
34+
dict: Dictionary containing all evaluation metrics
35+
"""
36+
self.actual = y_true
37+
self.predictions = y_pred
38+
39+
# Calculate basic metrics
40+
self.metrics['accuracy'] = accuracy_score(y_true, y_pred)
41+
self.metrics['precision'] = precision_score(y_true, y_pred, average='weighted', zero_division=0)
42+
self.metrics['recall'] = recall_score(y_true, y_pred, average='weighted', zero_division=0)
43+
self.metrics['f1_score'] = f1_score(y_true, y_pred, average='weighted', zero_division=0)
44+
45+
# Calculate ROC-AUC if probabilities are provided
46+
if y_pred_proba is not None:
47+
try:
48+
self.metrics['roc_auc'] = roc_auc_score(y_true, y_pred_proba)
49+
except:
50+
self.metrics['roc_auc'] = None
51+
52+
# Confusion matrix
53+
self.metrics['confusion_matrix'] = confusion_matrix(y_true, y_pred)
54+
55+
return self.metrics
56+
57+
def print_report(self):
58+
"""Print detailed classification report."""
59+
if self.actual is None or self.predictions is None:
60+
print("No metrics calculated yet. Call calculate_metrics first.")
61+
return
62+
63+
print("\n=== Classification Report ===")
64+
print(classification_report(self.actual, self.predictions))
65+
66+
def display_metrics(self):
67+
"""Display all calculated metrics."""
68+
print("\n=== Evaluation Metrics ===")
69+
print(f"Accuracy: {self.metrics.get('accuracy', 'N/A'):.4f}")
70+
print(f"Precision: {self.metrics.get('precision', 'N/A'):.4f}")
71+
print(f"Recall: {self.metrics.get('recall', 'N/A'):.4f}")
72+
print(f"F1-Score: {self.metrics.get('f1_score', 'N/A'):.4f}")
73+
if self.metrics.get('roc_auc'):
74+
print(f"ROC-AUC: {self.metrics.get('roc_auc', 'N/A'):.4f}")
75+
76+
def plot_confusion_matrix(self, save_path=None):
77+
"""Plot confusion matrix heatmap.
78+
79+
Args:
80+
save_path: Path to save the plot (optional)
81+
"""
82+
cm = self.metrics.get('confusion_matrix')
83+
if cm is None:
84+
print("No confusion matrix available.")
85+
return
86+
87+
plt.figure(figsize=(8, 6))
88+
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues')
89+
plt.title('Confusion Matrix')
90+
plt.ylabel('True Label')
91+
plt.xlabel('Predicted Label')
92+
93+
if save_path:
94+
plt.savefig(save_path)
95+
plt.show()
96+
97+
98+
if __name__ == '__main__':
99+
print('Metrics Evaluator module for Medical Appointment No-Shows prediction model')

0 commit comments

Comments
 (0)