diff --git a/blinkDetect.py b/blinkDetect.py index f1b6768..2fde4af 100644 --- a/blinkDetect.py +++ b/blinkDetect.py @@ -1,8 +1,7 @@ # -*- coding: utf-8 -*- """ -Created on Tue Oct 29 19:51:37 2019 - -@author: Lenovo +Enhanced Blink Detection with Drunk Driver Detection and Session Tracking +Combines drowsiness detection, drunk driver indicators, and session management """ import dlib @@ -14,95 +13,187 @@ from threading import Thread import playsound import queue +import math +from collections import deque +import smtplib +from email.mime.text import MIMEText +from email.mime.multipart import MIMEMultipart from datetime import datetime +import json - -FACE_DOWNSAMPLE_RATIO = 1.5 +# Configuration Constants +FACE_DOWNSAMPLE_RATIO = 0.5 RESIZE_HEIGHT = 460 +# Original drowsiness parameters thresh = 0.27 +blinkTime = 0.15 +drowsyTime = 1.5 -# IMPORTANT: You must download the shape_predictor_68_face_landmarks.dat file from -# https://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2 -# and place it in the 'models' folder +# Drunk detection parameters +HEAD_MOVEMENT_THRESHOLD = 15 # degrees +HEAD_STABILITY_WINDOW = 30 # frames +BLINK_DELAY_THRESHOLD = 1.5 # seconds +RED_EYE_THRESHOLD = 0.3 # red dominance ratio + +# File paths modelPath = "models/shape_predictor_68_face_landmarks.dat" sound_path = "alarm.wav" -detector = dlib.get_frontal_face_detector() -predictor = dlib.shape_predictor(modelPath) - +# Initialize dlib detectors +try: + detector = dlib.get_frontal_face_detector() + predictor = dlib.shape_predictor(modelPath) + print("โœ… Facial detection models loaded successfully") +except Exception as e: + print(f"โŒ Error loading models: {e}") + print("Please ensure the shape_predictor_68_face_landmarks.dat file is in the models/ directory") + sys.exit(1) + +# Eye landmark indices leftEyeIndex = [36, 37, 38, 39, 40, 41] rightEyeIndex = [42, 43, 44, 45, 46, 47] +# Original drowsiness variables blinkCount = 0 drowsy = 0 state = 0 -blinkTime = 0.15 #150ms -drowsyTime = 1.5 #1200ms ALARM_ON = False GAMMA = 1.5 threadStatusQ = queue.Queue() -# Phase 2: Session tracking variables (temporary until SessionManager is ready) +# Drunk detection variables +head_positions = deque(maxlen=HEAD_STABILITY_WINDOW) +blink_durations = deque(maxlen=10) +drunk_indicators = { + 'head_sway': False, + 'delayed_blink': False, + 'red_eyes': False, + 'droopy_eyelids': False +} +drunk_alert_sent = False + +# Session tracking variables current_ear = 0.0 session_ear_values = [] session_alerts = 0 session_start_time = None session_active = False -# Temporary Session Tracker - will be replaced by real SessionManager -class TempSessionTracker: +# Load emergency configuration +def load_emergency_config(): + """Load emergency contacts and email configuration""" + try: + with open("emergency_config.json", 'r') as f: + config = json.load(f) + return config["emergency_contacts"], config["email_settings"] + except Exception as e: + print(f"Warning: Could not load emergency config: {e}") + return ["emergency@example.com"], { + "sender_email": "your_email@gmail.com", + "sender_password": "your_app_password" + } + +emergency_contacts, email_settings = load_emergency_config() + +# Gamma correction setup +invGamma = 1.0/GAMMA +table = np.array([((i / 255.0) ** invGamma) * 255 for i in range(0, 256)]).astype("uint8") + +class SessionTracker: + """Enhanced session tracking with both drowsiness and drunk detection metrics""" + def __init__(self): global session_start_time, session_active session_start_time = datetime.now() session_active = True - print(f"Session started at: {session_start_time}") + self.drunk_alerts = 0 + self.impairment_events = [] + self.total_frames = 0 + print(f"๐Ÿ“Š Enhanced Detection Session started at: {session_start_time.strftime('%H:%M:%S')}") def add_ear_value(self, ear_value): global session_ear_values, current_ear current_ear = ear_value + self.total_frames += 1 timestamp = datetime.now() session_ear_values.append({ "value": round(ear_value, 4), "timestamp": timestamp.isoformat() }) - def add_alert(self): + def add_drowsy_alert(self): global session_alerts session_alerts += 1 timestamp = datetime.now() - print(f"Alert #{session_alerts} triggered at: {timestamp}") + print(f"๐Ÿ˜ด Drowsiness Alert #{session_alerts} at: {timestamp.strftime('%H:%M:%S')}") + + def add_drunk_alert(self, indicators): + self.drunk_alerts += 1 + timestamp = datetime.now() + event = { + "timestamp": timestamp.isoformat(), + "indicators": indicators.copy(), + "alert_number": self.drunk_alerts + } + self.impairment_events.append(event) + print(f"๐Ÿšจ Impairment Alert #{self.drunk_alerts} at: {timestamp.strftime('%H:%M:%S')}") + print(f" Active indicators: {[k for k, v in indicators.items() if v]}") + + def get_session_stats(self): + if not session_ear_values: + return {} + + ear_values = [item["value"] for item in session_ear_values] + return { + "avg_ear": np.mean(ear_values), + "min_ear": np.min(ear_values), + "max_ear": np.max(ear_values), + "total_frames": self.total_frames, + "drowsy_alerts": session_alerts, + "impairment_alerts": self.drunk_alerts, + "total_blinks": blinkCount + } def end_session(self): global session_start_time, session_active if session_active: end_time = datetime.now() - duration = (end_time - session_start_time).total_seconds() / 60 - avg_ear = sum(item["value"] for item in session_ear_values) / len(session_ear_values) if session_ear_values else 0 + duration_minutes = (end_time - session_start_time).total_seconds() / 60 + stats = self.get_session_stats() - print(f"\n=== Session Summary ===") - print(f"Duration: {duration:.2f} minutes") - print(f"Total EAR readings: {len(session_ear_values)}") - print(f"Average EAR: {avg_ear:.4f}") - print(f"Alerts triggered: {session_alerts}") - print(f"Total blinks: {blinkCount}") + print("\n" + "="*60) + print("๐Ÿ ENHANCED DETECTION SESSION SUMMARY") + print("="*60) + print(f"โฑ๏ธ Session Duration: {duration_minutes:.2f} minutes") + print(f"๐Ÿ“Š Total Frames Processed: {stats.get('total_frames', 0):,}") + print(f"๐Ÿ‘๏ธ Average EAR: {stats.get('avg_ear', 0):.4f}") + print(f"๐Ÿ‘๏ธ EAR Range: {stats.get('min_ear', 0):.4f} - {stats.get('max_ear', 0):.4f}") + print(f"๐Ÿ˜ด Drowsiness Alerts: {stats.get('drowsy_alerts', 0)}") + print(f"๐Ÿšจ Impairment Alerts: {stats.get('impairment_alerts', 0)}") + print(f"๐Ÿ‘€ Total Blinks Detected: {stats.get('total_blinks', 0)}") + if self.impairment_events: + print(f"๐Ÿ” Impairment Event Details:") + for event in self.impairment_events: + indicators = [k for k, v in event['indicators'].items() if v] + timestamp = datetime.fromisoformat(event['timestamp']).strftime('%H:%M:%S') + print(f" โ€ข Alert #{event['alert_number']} at {timestamp}: {', '.join(indicators)}") + + print("="*60) session_active = False -# Initialize session tracker -session_tracker = None - -invGamma = 1.0/GAMMA -table = np.array([((i / 255.0) ** invGamma) * 255 for i in range(0, 256)]).astype("uint8") - def gamma_correction(image): + """Apply gamma correction to improve image contrast""" return cv2.LUT(image, table) def histogram_equalization(image): + """Apply histogram equalization for better lighting conditions""" gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) - return cv2.equalizeHist(gray) + return cv2.equalizeHist(gray) def soundAlert(path, threadStatusQ): + """Play sound alert in separate thread""" import traceback while True: if not threadStatusQ.empty(): @@ -112,88 +203,305 @@ def soundAlert(path, threadStatusQ): try: playsound.playsound(path) except Exception as e: - print(f"Error playing sound: {e}") - traceback.print_exc() + print(f"๐Ÿ”Š Audio alert error: {e}") + # Try alternative audio playback methods + try: + import os + if os.name == 'nt': # Windows + import winsound + winsound.PlaySound(path, winsound.SND_FILENAME) + else: # Linux/Mac + os.system(f"aplay {path} 2>/dev/null || afplay {path} 2>/dev/null") + except: + print("โš ๏ธ Could not play audio alert") break def eye_aspect_ratio(eye): + """Calculate Eye Aspect Ratio (EAR)""" A = dist.euclidean(eye[1], eye[5]) B = dist.euclidean(eye[2], eye[4]) C = dist.euclidean(eye[0], eye[3]) ear = (A + B) / (2.0 * C) - return ear +def get_head_pose(landmarks, img_shape): + """Calculate head pose angles for drunk detection""" + # 3D model points + model_points = np.array([ + (0.0, 0.0, 0.0), # Nose tip + (0.0, -330.0, -65.0), # Chin + (-225.0, 170.0, -135.0), # Left eye left corner + (225.0, 170.0, -135.0), # Right eye right corner + (-150.0, -150.0, -125.0), # Left Mouth corner + (150.0, -150.0, -125.0) # Right mouth corner + ]) + + # 2D image points from landmarks + image_points = np.array([ + landmarks[30], # Nose tip + landmarks[8], # Chin + landmarks[36], # Left eye left corner + landmarks[45], # Right eye right corner + landmarks[48], # Left mouth corner + landmarks[54] # Right mouth corner + ], dtype=np.float64) + + # Camera internals + focal_length = img_shape[1] + center = (img_shape[1]/2, img_shape[0]/2) + camera_matrix = np.array([ + [focal_length, 0, center[0]], + [0, focal_length, center[1]], + [0, 0, 1] + ], dtype=np.float64) + + dist_coeffs = np.zeros((4, 1)) + + try: + success, rotation_vector, translation_vector = cv2.solvePnP( + model_points, image_points, camera_matrix, dist_coeffs + ) + + if success: + rotation_matrix, _ = cv2.Rodrigues(rotation_vector) + angles = rotation_matrix_to_euler_angles(rotation_matrix) + return angles + except Exception as e: + pass + + return [0, 0, 0] + +def rotation_matrix_to_euler_angles(R): + """Convert rotation matrix to euler angles""" + sy = math.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0]) + singular = sy < 1e-6 + + if not singular: + x = math.atan2(R[2,1], R[2,2]) + y = math.atan2(-R[2,0], sy) + z = math.atan2(R[1,0], R[0,0]) + else: + x = math.atan2(-R[1,2], R[1,1]) + y = math.atan2(-R[2,0], sy) + z = 0 + + return [math.degrees(x), math.degrees(y), math.degrees(z)] + +def detect_head_sway(angles): + """Detect excessive head movement indicating impairment""" + head_positions.append(angles) + + if len(head_positions) < HEAD_STABILITY_WINDOW: + return False + + positions_array = np.array(head_positions) + variance = np.var(positions_array, axis=0) + + # Check for excessive movement in any direction + excessive_movement = any(var > HEAD_MOVEMENT_THRESHOLD**2 for var in variance) + return excessive_movement + +def analyze_eye_redness(eye_region): + """Analyze eye region for redness indicating impairment""" + if eye_region.size == 0 or eye_region.shape[0] < 5 or eye_region.shape[1] < 5: + return False + + try: + hsv = cv2.cvtColor(eye_region, cv2.COLOR_BGR2HSV) + + # Define red color ranges in HSV + lower_red1 = np.array([0, 50, 50]) + upper_red1 = np.array([10, 255, 255]) + lower_red2 = np.array([170, 50, 50]) + upper_red2 = np.array([180, 255, 255]) + + mask1 = cv2.inRange(hsv, lower_red1, upper_red1) + mask2 = cv2.inRange(hsv, lower_red2, upper_red2) + red_mask = mask1 + mask2 + + red_pixels = cv2.countNonZero(red_mask) + total_pixels = eye_region.shape[0] * eye_region.shape[1] + red_ratio = red_pixels / total_pixels if total_pixels > 0 else 0 + + return red_ratio > RED_EYE_THRESHOLD + except Exception as e: + return False + +def detect_delayed_blink(ear, current_time): + """Detect delayed blinking patterns""" + if not hasattr(detect_delayed_blink, 'blink_start_time'): + detect_delayed_blink.blink_start_time = None + + if ear < thresh: + if detect_delayed_blink.blink_start_time is None: + detect_delayed_blink.blink_start_time = current_time + else: + if detect_delayed_blink.blink_start_time is not None: + duration = current_time - detect_delayed_blink.blink_start_time + blink_durations.append(duration) + detect_delayed_blink.blink_start_time = None + + if len(blink_durations) >= 3: + avg_duration = np.mean(list(blink_durations)) + return avg_duration > BLINK_DELAY_THRESHOLD + + return False + +def detect_droopy_eyelids(landmarks): + """Detect droopy eyelids indicating impairment""" + left_eye_landmarks = [landmarks[i] for i in leftEyeIndex] + right_eye_landmarks = [landmarks[i] for i in rightEyeIndex] + + left_ear = eye_aspect_ratio(left_eye_landmarks) + right_ear = eye_aspect_ratio(right_eye_landmarks) + avg_ear = (left_ear + right_ear) / 2.0 + + # Eyes partially closed but not blinking (between normal open and closed) + return 0.15 < avg_ear < 0.22 + +def send_emergency_alert(alert_type="IMPAIRMENT", severity="HIGH"): + """Send emergency email alert for severe impairment""" + global drunk_alert_sent + + if drunk_alert_sent: + return + + def send_email(): + global drunk_alert_sent + try: + sender_email = email_settings["sender_email"] + password = email_settings["sender_password"] + + message = MIMEMultipart() + message["From"] = sender_email + message["Subject"] = f"๐Ÿšจ URGENT: Driver {alert_type} Alert - {severity} Priority" + + # Get current session stats + stats = session_tracker.get_session_stats() if 'session_tracker' in globals() else {} + + body = f""" +๐Ÿšจ EMERGENCY ALERT: Driver Impairment Detected ๐Ÿšจ + +โฐ Alert Time: {time.strftime('%Y-%m-%d %H:%M:%S')} +๐Ÿ” Alert Type: {alert_type} +โš ๏ธ Severity Level: {severity} + +๐Ÿ“Š Current Detection Status: +{'='*50} +๐ŸŽฏ Active Impairment Indicators: +โ€ข Head Sway/Movement: {'โœ… DETECTED' if drunk_indicators['head_sway'] else 'โŒ Normal'} +โ€ข Delayed Blinking: {'โœ… DETECTED' if drunk_indicators['delayed_blink'] else 'โŒ Normal'} +โ€ข Eye Redness: {'โœ… DETECTED' if drunk_indicators['red_eyes'] else 'โŒ Normal'} +โ€ข Droopy Eyelids: {'โœ… DETECTED' if drunk_indicators['droopy_eyelids'] else 'โŒ Normal'} +โ€ข Drowsiness State: {'โœ… ACTIVE' if drowsy else 'โŒ Alert'} + +๐Ÿ“ˆ Session Statistics: +โ€ข Current EAR Value: {current_ear:.4f} +โ€ข Total Drowsiness Alerts: {stats.get('drowsy_alerts', 0)} +โ€ข Total Impairment Alerts: {stats.get('impairment_alerts', 0)} +โ€ข Detected Blinks: {stats.get('total_blinks', 0)} + +โš ๏ธ IMMEDIATE ACTION RECOMMENDED โš ๏ธ + +This is an automated alert from the Enhanced Driver Detection System. +The system has detected concerning patterns that may indicate driver impairment. + +๐Ÿ”ง System Information: +โ€ข Detection Algorithm: Enhanced Multi-Modal Analysis +โ€ข Confidence Level: High +โ€ข Monitoring Duration: Continuous + +๐Ÿ“ž Emergency Response Protocol: +1. Attempt to contact the driver immediately +2. If no response, consider emergency services +3. Check driver location and status + +This message was generated automatically by the driver safety monitoring system. + """ + + message.attach(MIMEText(body, "plain")) + + # Send email to all emergency contacts + with smtplib.SMTP("smtp.gmail.com", 587) as server: + server.starttls() + server.login(sender_email, password) + for contact in emergency_contacts: + message["To"] = contact + text = message.as_string() + server.sendmail(sender_email, contact, text) + del message["To"] + + print(f"๐Ÿ“ง Emergency alert sent to {len(emergency_contacts)} contact(s)") + drunk_alert_sent = True + + except Exception as e: + print(f"โŒ Failed to send emergency alert: {e}") + print(" Please check email configuration in emergency_config.json") + + # Send in separate thread to avoid blocking detection + alert_thread = Thread(target=send_email) + alert_thread.daemon = True + alert_thread.start() -def checkEyeStatus(landmarks): +def checkEyeStatus(landmarks, frame): + """Check eye status and calculate EAR""" global session_tracker, current_ear - mask = np.zeros(frame.shape[:2], dtype = np.float32) + + mask = np.zeros(frame.shape[:2], dtype=np.float32) hullLeftEye = [] for i in range(0, len(leftEyeIndex)): hullLeftEye.append((landmarks[leftEyeIndex[i]][0], landmarks[leftEyeIndex[i]][1])) - cv2.fillConvexPoly(mask, np.int32(hullLeftEye), 255) hullRightEye = [] for i in range(0, len(rightEyeIndex)): hullRightEye.append((landmarks[rightEyeIndex[i]][0], landmarks[rightEyeIndex[i]][1])) - - cv2.fillConvexPoly(mask, np.int32(hullRightEye), 255) leftEAR = eye_aspect_ratio(hullLeftEye) rightEAR = eye_aspect_ratio(hullRightEye) - ear = (leftEAR + rightEAR) / 2.0 - if session_tracker: + if 'session_tracker' in globals() and session_tracker: session_tracker.add_ear_value(ear) - eyeStatus = 1 # 1 = Open, 0 = closed - if (ear < thresh): - eyeStatus = 0 - - return eyeStatus + eyeStatus = 1 if ear >= thresh else 0 + return eyeStatus, ear def checkBlinkStatus(eyeStatus): - global state, blinkCount, drowsy, session_tracker - if(state >= 0 and state <= falseBlinkLimit): - if(eyeStatus): + """Check blink status and drowsiness""" + global state, blinkCount, drowsy, session_tracker, falseBlinkLimit, drowsyLimit + + if state >= 0 and state <= falseBlinkLimit: + if eyeStatus: state = 0 - else: state += 1 - - elif(state >= falseBlinkLimit and state < drowsyLimit): - if(eyeStatus): + elif state >= falseBlinkLimit and state < drowsyLimit: + if eyeStatus: blinkCount += 1 state = 0 - else: state += 1 - - else: - if(eyeStatus): + if eyeStatus: state = 0 drowsy = 3 blinkCount += 1 - # Phase 2: Track alert when drowsiness is detected - if session_tracker: - session_tracker.add_alert() - + if 'session_tracker' in globals() and session_tracker: + session_tracker.add_drowsy_alert() else: drowsy = 3 - # Phase 2: Track alert when drowsiness persists - if session_tracker: - session_tracker.add_alert() + if 'session_tracker' in globals() and session_tracker: + session_tracker.add_drowsy_alert() def getLandmarks(im): + """Extract facial landmarks""" imSmall = cv2.resize(im, None, - fx = 1.0/FACE_DOWNSAMPLE_RATIO, - fy = 1.0/FACE_DOWNSAMPLE_RATIO, - interpolation = cv2.INTER_LINEAR) + fx=1.0/FACE_DOWNSAMPLE_RATIO, + fy=1.0/FACE_DOWNSAMPLE_RATIO, + interpolation=cv2.INTER_LINEAR) rects = detector(imSmall, 0) if len(rects) == 0: @@ -208,172 +516,378 @@ def getLandmarks(im): [points.append((p.x, p.y)) for p in predictor(im, newRect).parts()] return points -# Phase 2: Getter functions for external access (for session_history.py) -def get_current_ear(): - """Get the current EAR value""" - return current_ear - -def get_current_blink_count(): - """Get the current blink count""" - return blinkCount - -def get_session_data(): - """Get all current session data""" - return { - 'ear': current_ear, - 'blink_count': blinkCount, - 'alerts': session_alerts, - 'drowsy_state': drowsy, - 'eye_state': state, - 'session_active': session_active - } - -def get_session_ear_values(): - """Get all EAR values collected in current session""" - return session_ear_values - -def start_new_session(): - """Start a new tracking session""" - global session_tracker - if session_tracker: - session_tracker.end_session() - session_tracker = TempSessionTracker() - -def end_current_session(): - """End the current tracking session""" - global session_tracker - if session_tracker: - session_tracker.end_session() - session_tracker = None - -capture = cv2.VideoCapture(0) - -for i in range(10): - ret, frame = capture.read() - if not capture.isOpened(): - print("Error: Could not open webcam.") - sys.exit() - -totalTime = 0.0 -validFrames = 0 -dummyFrames = 100 - -print("Caliberation in Progress!") -while(validFrames < dummyFrames): - validFrames += 1 - t = time.time() - ret, frame = capture.read() - if not ret or frame is None: - print("Error: Could not read frame from webcam.") - break - - height, width = frame.shape[:2] - IMAGE_RESIZE = np.float32(height)/RESIZE_HEIGHT - frame = cv2.resize(frame, None, - fx = 1/IMAGE_RESIZE, - fy = 1/IMAGE_RESIZE, - interpolation = cv2.INTER_LINEAR) - - #adjusted = gamma_correction(frame) - adjusted = histogram_equalization(frame) - - landmarks = getLandmarks(adjusted) - timeLandmarks = time.time() - t - - if landmarks == 0: - validFrames -= 1 - cv2.putText(frame, "Unable to detect face, Please check proper lighting", (10, 30), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 1, cv2.LINE_AA) - cv2.putText(frame, "or decrease FACE_DOWNSAMPLE_RATIO", (10, 50), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 1, cv2.LINE_AA) - cv2.imshow("Blink Detection Demo", frame) - if cv2.waitKey(1) & 0xFF == 27: - break +def analyze_drunk_indicators(landmarks, frame, ear, current_time): + """Analyze all drunk driver indicators""" + global drunk_indicators, session_tracker + + # Head pose analysis + head_angles = get_head_pose(landmarks, frame.shape) + drunk_indicators['head_sway'] = detect_head_sway(head_angles) + + # Delayed blink detection + drunk_indicators['delayed_blink'] = detect_delayed_blink(ear, current_time) + + # Droopy eyelids detection + drunk_indicators['droopy_eyelids'] = detect_droopy_eyelids(landmarks) + + # Eye redness analysis with improved error handling + try: + left_eye_points = np.array([landmarks[i] for i in leftEyeIndex]) + right_eye_points = np.array([landmarks[i] for i in rightEyeIndex]) + + left_rect = cv2.boundingRect(left_eye_points) + right_rect = cv2.boundingRect(right_eye_points) + + padding = 8 + left_eye_region = frame[ + max(0, left_rect[1]-padding):left_rect[1]+left_rect[3]+padding, + max(0, left_rect[0]-padding):left_rect[0]+left_rect[2]+padding + ] + right_eye_region = frame[ + max(0, right_rect[1]-padding):right_rect[1]+right_rect[3]+padding, + max(0, right_rect[0]-padding):right_rect[0]+right_rect[2]+padding + ] + + left_red = analyze_eye_redness(left_eye_region) + right_red = analyze_eye_redness(right_eye_region) + drunk_indicators['red_eyes'] = left_red or right_red + + except Exception as e: + drunk_indicators['red_eyes'] = False + + # Track impairment alerts + active_indicators = sum(drunk_indicators.values()) + if active_indicators > 0 and 'session_tracker' in globals() and session_tracker: + session_tracker.add_drunk_alert(drunk_indicators) + + return head_angles +def draw_drunk_indicators(frame, head_angles, ear): + """Draw drunk detection indicators and enhanced UI on frame""" + + # Background overlay for better text visibility + overlay = frame.copy() + cv2.rectangle(overlay, (0, 0), (frame.shape[1], 200), (0, 0, 0), -1) + frame = cv2.addWeighted(frame, 0.7, overlay, 0.3, 0) + + # Main EAR display + ear_color = (0, 255, 0) if ear > thresh else (0, 0, 255) + cv2.putText(frame, f"EAR: {ear:.3f}", (10, 30), + cv2.FONT_HERSHEY_SIMPLEX, 0.8, ear_color, 2) + + # Blink counter + cv2.putText(frame, f"Blinks: {blinkCount}", (10, 60), + cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2) + + # Head pose information + cv2.putText(frame, f"Head: P{head_angles[0]:.1f}ยฐ Y{head_angles[1]:.1f}ยฐ R{head_angles[2]:.1f}ยฐ", + (10, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (200, 200, 200), 2) + + # Impairment indicators + active_indicators = sum(drunk_indicators.values()) + + if active_indicators > 0: + # Main indicator count + indicator_color = (0, 165, 255) if active_indicators < 3 else (0, 0, 255) + cv2.putText(frame, f"IMPAIRMENT INDICATORS: {active_indicators}/4", + (10, 125), cv2.FONT_HERSHEY_SIMPLEX, 0.8, indicator_color, 2) + + # Individual indicators + y_offset = 155 + indicator_names = { + 'head_sway': 'Head Movement', + 'delayed_blink': 'Slow Blinking', + 'red_eyes': 'Eye Redness', + 'droopy_eyelids': 'Droopy Eyes' + } + + for key, name in indicator_names.items(): + if drunk_indicators[key]: + cv2.putText(frame, f"โ€ข {name}", (20, y_offset), + cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 165, 255), 2) + y_offset += 25 + + # Session info (bottom right) + if 'session_tracker' in globals() and session_tracker: + session_duration = (datetime.now() - session_start_time).total_seconds() / 60 + cv2.putText(frame, f"Session: {session_duration:.1f}min", + (frame.shape[1] - 180, frame.shape[0] - 60), + cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2) + cv2.putText(frame, f"Alerts: D{session_alerts} I{session_tracker.drunk_alerts}", + (frame.shape[1] - 180, frame.shape[0] - 30), + cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2) + +def main(): + """Main detection loop with enhanced error handling and features""" + global session_tracker, drowsyLimit, falseBlinkLimit, frame, drowsy, ALARM_ON + + print("๐Ÿš€ Starting Enhanced Driver Detection System...") + print("๐Ÿ”ง Initializing camera and calibration...") + + # Initialize camera with better error handling + for camera_index in range(3): # Try multiple camera indices + capture = cv2.VideoCapture(camera_index) + if capture.isOpened(): + print(f"๐Ÿ“น Camera {camera_index} initialized successfully") + break + capture.release() else: - totalTime += timeLandmarks -print("Caliberation Complete!") - -spf = totalTime/dummyFrames -print("Current SPF (seconds per frame) is {:.2f} ms".format(spf * 1000)) - -drowsyLimit = drowsyTime/spf -falseBlinkLimit = blinkTime/spf -print("drowsy limit: {}, false blink limit: {}".format(drowsyLimit, falseBlinkLimit)) + print("โŒ Error: Could not access any camera") + input("Press Enter to exit...") + sys.exit(1) + + # Test camera capture + for i in range(10): + ret, frame = capture.read() + if ret and frame is not None: + break + time.sleep(0.1) + else: + print("โŒ Error: Could not read frames from camera") + capture.release() + sys.exit(1) -# Phase 2: Start session tracking -session_tracker = TempSessionTracker() + # Calibration phase with progress indication + totalTime = 0.0 + validFrames = 0 + dummyFrames = 100 -if __name__ == "__main__": - vid_writer = cv2.VideoWriter('output-low-light-2.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 15, (frame.shape[1],frame.shape[0])) - while(1): - try: - t = time.time() + print(f"๐ŸŽฏ Calibrating detection system ({dummyFrames} frames)...") + + while validFrames < dummyFrames: + validFrames += 1 + t = time.time() + ret, frame = capture.read() + + if not ret or frame is None: + print("โš ๏ธ Frame read error during calibration") + validFrames -= 1 + continue + + height, width = frame.shape[:2] + IMAGE_RESIZE = np.float32(height)/RESIZE_HEIGHT + frame = cv2.resize(frame, None, + fx=1/IMAGE_RESIZE, + fy=1/IMAGE_RESIZE, + interpolation=cv2.INTER_LINEAR) + + adjusted = histogram_equalization(frame) + landmarks = getLandmarks(adjusted) + timeLandmarks = time.time() - t + + if landmarks == 0: + validFrames -= 1 + # Show calibration progress + progress = f"Calibration Progress: {validFrames}/{dummyFrames}" + cv2.putText(frame, progress, (10, 30), + cv2.FONT_HERSHEY_COMPLEX, 0.7, (0, 255, 0), 2) + cv2.putText(frame, "Position your face clearly in camera view", + (10, 70), cv2.FONT_HERSHEY_COMPLEX, 0.6, (0, 255, 255), 2) + cv2.putText(frame, "Ensure good lighting conditions", + (10, 100), cv2.FONT_HERSHEY_COMPLEX, 0.6, (0, 255, 255), 2) + cv2.imshow("Enhanced Detection System - Calibration", frame) + if cv2.waitKey(1) & 0xFF == 27: # ESC key + capture.release() + cv2.destroyAllWindows() + sys.exit(0) + else: + totalTime += timeLandmarks + # Show successful calibration frame + progress = f"Calibration: {validFrames}/{dummyFrames}" + cv2.putText(frame, progress, (10, 30), + cv2.FONT_HERSHEY_COMPLEX, 0.7, (0, 255, 0), 2) + cv2.putText(frame, "Face detected successfully!", + (10, 70), cv2.FONT_HERSHEY_COMPLEX, 0.6, (0, 255, 0), 2) + cv2.imshow("Enhanced Detection System - Calibration", frame) + cv2.waitKey(1) + + print("โœ… Calibration completed successfully!") + + spf = totalTime/dummyFrames + print(f"๐Ÿ“Š Performance: {spf * 1000:.2f} ms per frame") + + drowsyLimit = int(drowsyTime/spf) + falseBlinkLimit = int(blinkTime/spf) + print(f"๐ŸŽฏ Detection thresholds - Drowsy: {drowsyLimit}, Blink: {falseBlinkLimit}") + + # Initialize session tracking + session_tracker = SessionTracker() + + # Initialize video writer + fourcc = cv2.VideoWriter_fourcc(*'MJPG') + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + vid_writer = cv2.VideoWriter(f'enhanced_detection_session_{timestamp}.avi', + fourcc, 15, (frame.shape[1], frame.shape[0])) + + print("\n" + "="*60) + print("๐Ÿš€ ENHANCED DRIVER DETECTION SYSTEM - ACTIVE") + print("="*60) + print("๐ŸŽฏ Features Active:") + print(" โ€ข Real-time drowsiness detection") + print(" โ€ข Advanced impairment analysis") + print(" โ€ข Emergency alert system") + print(" โ€ข Session tracking & analytics") + print("\nโŒจ๏ธ Controls:") + print(" โ€ข 'q' - Quit system") + print(" โ€ข 'r' - Reset all alerts") + print(" โ€ข ESC - Emergency exit") + print("="*60 + "\n") + + frame_count = 0 + last_stats_time = time.time() + + # Main detection loop + try: + while True: + current_time = time.time() ret, frame = capture.read() + + if not ret or frame is None: + print("โš ๏ธ Camera disconnected or frame read error") + break + + frame_count += 1 + + # Resize frame for processing height, width = frame.shape[:2] IMAGE_RESIZE = np.float32(height)/RESIZE_HEIGHT frame = cv2.resize(frame, None, - fx = 1/IMAGE_RESIZE, - fy = 1/IMAGE_RESIZE, - interpolation = cv2.INTER_LINEAR) + fx=1/IMAGE_RESIZE, + fy=1/IMAGE_RESIZE, + interpolation=cv2.INTER_LINEAR) - # adjusted = gamma_correction(frame) adjusted = histogram_equalization(frame) - landmarks = getLandmarks(adjusted) + if landmarks == 0: - validFrames -= 1 - cv2.putText(frame, "Unable to detect face, Please check proper lighting", (10, 30), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 1, cv2.LINE_AA) - cv2.putText(frame, "or decrease FACE_DOWNSAMPLE_RATIO", (10, 50), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 1, cv2.LINE_AA) - cv2.imshow("Blink Detection Demo", frame) - if cv2.waitKey(1) & 0xFF == 27: + cv2.putText(frame, "โš ๏ธ No face detected - Please check lighting and position", + (10, 50), cv2.FONT_HERSHEY_COMPLEX, 0.7, (0, 0, 255), 2) + cv2.putText(frame, "System Status: Waiting for face detection", + (10, 80), cv2.FONT_HERSHEY_COMPLEX, 0.6, (255, 255, 0), 2) + cv2.imshow("Enhanced Detection System", frame) + if cv2.waitKey(1) & 0xFF == ord('q'): break continue - eyeStatus = checkEyeStatus(landmarks) + # Core drowsiness detection + eyeStatus, ear = checkEyeStatus(landmarks, frame) checkBlinkStatus(eyeStatus) - for i in range(0, len(leftEyeIndex)): - cv2.circle(frame, (landmarks[leftEyeIndex[i]][0], landmarks[leftEyeIndex[i]][1]), 1, (0, 0, 255), -1, lineType=cv2.LINE_AA) - - for i in range(0, len(rightEyeIndex)): - cv2.circle(frame, (landmarks[rightEyeIndex[i]][0], landmarks[rightEyeIndex[i]][1]), 1, (0, 0, 255), -1, lineType=cv2.LINE_AA) - - if drowsy: - cv2.putText(frame, "! ! ! DROWSINESS ALERT ! ! !", (70, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA) + # Advanced impairment detection + head_angles = analyze_drunk_indicators(landmarks, frame, ear, current_time) + + # Draw eye landmarks + for i in leftEyeIndex + rightEyeIndex: + cv2.circle(frame, (landmarks[i][0], landmarks[i][1]), + 2, (0, 255, 0), -1, lineType=cv2.LINE_AA) + + # Enhanced UI display + draw_drunk_indicators(frame, head_angles, ear) + + # Drowsiness alert handling + if drowsy > 0: + drowsy -= 1 + cv2.putText(frame, "๐Ÿšจ DROWSINESS ALERT ๐Ÿšจ", (50, frame.shape[0] - 100), + cv2.FONT_HERSHEY_COMPLEX, 1.2, (0, 0, 255), 3, cv2.LINE_AA) + cv2.putText(frame, "Driver appears to be falling asleep!", (50, frame.shape[0] - 60), + cv2.FONT_HERSHEY_COMPLEX, 0.8, (0, 0, 255), 2, cv2.LINE_AA) + if not ALARM_ON: ALARM_ON = True threadStatusQ.put(not ALARM_ON) thread = Thread(target=soundAlert, args=(sound_path, threadStatusQ,)) thread.setDaemon(True) thread.start() - else: - cv2.putText(frame, "Blinks : {}".format(blinkCount), (460, 80), cv2.FONT_HERSHEY_COMPLEX, 0.8, (0,0,255), 2, cv2.LINE_AA) - # (0, 400) - ALARM_ON = False + if ALARM_ON: + ALARM_ON = False + threadStatusQ.put(True) # Stop alarm - - cv2.imshow("Blink Detection", frame) + # Emergency alert conditions + drunk_indicators_count = sum(drunk_indicators.values()) + severe_impairment = (drunk_indicators_count >= 2 and drowsy > 0) or drunk_indicators_count >= 3 + + if severe_impairment and not drunk_alert_sent: + severity = "CRITICAL" if drunk_indicators_count >= 3 and drowsy > 0 else "HIGH" + send_emergency_alert("SEVERE IMPAIRMENT", severity) + + # Visual emergency alert + cv2.rectangle(frame, (0, frame.shape[0] - 150), (frame.shape[1], frame.shape[0]), (0, 0, 255), -1) + cv2.putText(frame, "๐Ÿ†˜ EMERGENCY ALERT SENT ๐Ÿ†˜", (50, frame.shape[0] - 100), + cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 3, cv2.LINE_AA) + cv2.putText(frame, "Severe impairment detected - Contacts notified", (50, frame.shape[0] - 60), + cv2.FONT_HERSHEY_COMPLEX, 0.7, (255, 255, 255), 2, cv2.LINE_AA) + + # Show main detection window + cv2.imshow("Enhanced Detection System", frame) vid_writer.write(frame) - k = cv2.waitKey(1) - if k == ord('r'): + # Print periodic statistics + if current_time - last_stats_time > 30: # Every 30 seconds + if session_tracker: + stats = session_tracker.get_session_stats() + print(f"๐Ÿ“Š Session Update - EAR: {current_ear:.3f}, " + f"Blinks: {stats.get('total_blinks', 0)}, " + f"Alerts: D{stats.get('drowsy_alerts', 0)}/I{stats.get('impairment_alerts', 0)}") + last_stats_time = current_time + + # Handle keyboard input + key = cv2.waitKey(1) & 0xFF + if key == ord('q'): + print("๐Ÿ›‘ Quit command received") + break + elif key == ord('r'): + # Reset all detection states + print("๐Ÿ”„ Resetting all alerts and detection states...") state = 0 drowsy = 0 ALARM_ON = False - threadStatusQ.put(not ALARM_ON) - - elif k == ord('q'): + drunk_alert_sent = False + drunk_indicators = {k: False for k in drunk_indicators} + head_positions.clear() + blink_durations.clear() + threadStatusQ.put(True) # Stop any active alarms + print("โœ… All alerts reset successfully") + elif key == 27: # ESC key + print("๐Ÿšจ Emergency exit") break - # print("Time taken", time.time() - t) + except KeyboardInterrupt: + print("\n๐Ÿ›‘ Interrupted by user (Ctrl+C)") + except Exception as e: + print(f"โŒ Unexpected error in main loop: {e}") + import traceback + traceback.print_exc() + + finally: + # Cleanup and session summary + print("\n๐Ÿ”„ Shutting down Enhanced Detection System...") + + # Stop any active alarms + if ALARM_ON: + threadStatusQ.put(True) + + # End session and show summary + if session_tracker: + session_tracker.end_session() + # Release resources + try: + capture.release() + vid_writer.release() + cv2.destroyAllWindows() + print("โœ… Resources released successfully") except Exception as e: - print(e) - - # Phase 2: End session when detection stops - if session_tracker: - session_tracker.end_session() + print(f"โš ๏ธ Error during cleanup: {e}") + + print("๐Ÿ‘‹ Enhanced Detection System shutdown complete") + print("Thank you for using the driver safety system!") - capture.release() - vid_writer.release() - cv2.destroyAllWindows() +if __name__ == "__main__": + try: + main() + except Exception as e: + print(f"โŒ Critical error: {e}") + import traceback + traceback.print_exc() + input("Press Enter to exit...") \ No newline at end of file diff --git a/enhanced_driver_detection.py b/enhanced_driver_detection.py new file mode 100644 index 0000000..89381cf --- /dev/null +++ b/enhanced_driver_detection.py @@ -0,0 +1,402 @@ +#!/usr/bin/env python3 +""" +Enhanced Driver Impairment Detection System +Combines drowsiness detection with drunk driver detection features +""" + +import cv2 +import dlib +import numpy as np +from scipy.spatial import distance as dist +import time +import threading +import queue +import math +from collections import deque +import smtplib +from email.mime.text import MIMEText +from email.mime.multipart import MIMEMultipart + +class ImpairedDriverDetector: + def __init__(self): + # Load face detector and predictor + self.detector = dlib.get_frontal_face_detector() + self.predictor = dlib.shape_predictor("models/shape_predictor_68_face_landmarks.dat") + + # Eye landmarks indices + self.left_eye = [36, 37, 38, 39, 40, 41] + self.right_eye = [42, 43, 44, 45, 46, 47] + + # Drowsiness detection parameters + self.ear_threshold = 0.25 + self.ear_consec_frames = 20 + self.ear_counter = 0 + + # Drunk detection parameters + self.head_movement_threshold = 15 # degrees + self.head_stability_window = 30 # frames + self.blink_delay_threshold = 2.0 # seconds + self.red_eye_threshold = 0.4 # red channel dominance + + # Data storage for analysis + self.head_positions = deque(maxlen=self.head_stability_window) + self.blink_times = deque(maxlen=10) + self.eye_colors = deque(maxlen=20) + + # Alert system + self.alert_active = False + self.emergency_contacts = ["emergency@example.com"] # Add real contacts + + # Status tracking + self.drowsy_detected = False + self.drunk_indicators = { + 'head_sway': False, + 'delayed_blink': False, + 'red_eyes': False, + 'droopy_eyelids': False + } + + def calculate_ear(self, eye_landmarks): + """Calculate Eye Aspect Ratio""" + # Vertical eye landmarks + A = dist.euclidean(eye_landmarks[1], eye_landmarks[5]) + B = dist.euclidean(eye_landmarks[2], eye_landmarks[4]) + # Horizontal eye landmark + C = dist.euclidean(eye_landmarks[0], eye_landmarks[3]) + + ear = (A + B) / (2.0 * C) + return ear + + def get_head_pose(self, landmarks, img_shape): + """Calculate head pose angles""" + # 3D model points + model_points = np.array([ + (0.0, 0.0, 0.0), # Nose tip + (0.0, -330.0, -65.0), # Chin + (-225.0, 170.0, -135.0), # Left eye left corner + (225.0, 170.0, -135.0), # Right eye right corner + (-150.0, -150.0, -125.0), # Left Mouth corner + (150.0, -150.0, -125.0) # Right mouth corner + ]) + + # 2D image points from landmarks + image_points = np.array([ + landmarks[30], # Nose tip + landmarks[8], # Chin + landmarks[36], # Left eye left corner + landmarks[45], # Right eye right corner + landmarks[48], # Left mouth corner + landmarks[54] # Right mouth corner + ], dtype=np.float64) + + # Camera internals + focal_length = img_shape[1] + center = (img_shape[1]/2, img_shape[0]/2) + camera_matrix = np.array([ + [focal_length, 0, center[0]], + [0, focal_length, center[1]], + [0, 0, 1] + ], dtype=np.float64) + + dist_coeffs = np.zeros((4, 1)) + + try: + success, rotation_vector, translation_vector = cv2.solvePnP( + model_points, image_points, camera_matrix, dist_coeffs + ) + + if success: + # Convert rotation vector to angles + rotation_matrix, _ = cv2.Rodrigues(rotation_vector) + angles = self.rotation_matrix_to_euler_angles(rotation_matrix) + return angles + except: + pass + + return [0, 0, 0] # Default values if calculation fails + + def rotation_matrix_to_euler_angles(self, R): + """Convert rotation matrix to euler angles""" + sy = math.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0]) + singular = sy < 1e-6 + + if not singular: + x = math.atan2(R[2,1], R[2,2]) + y = math.atan2(-R[2,0], sy) + z = math.atan2(R[1,0], R[0,0]) + else: + x = math.atan2(-R[1,2], R[1,1]) + y = math.atan2(-R[2,0], sy) + z = 0 + + return [math.degrees(x), math.degrees(y), math.degrees(z)] + + def detect_head_sway(self, angles): + """Detect excessive head movement indicating impairment""" + self.head_positions.append(angles) + + if len(self.head_positions) < self.head_stability_window: + return False + + # Calculate movement variance + positions_array = np.array(self.head_positions) + variance = np.var(positions_array, axis=0) + + # Check if movement exceeds threshold + excessive_movement = any(var > self.head_movement_threshold**2 for var in variance) + + return excessive_movement + + def analyze_eye_color(self, eye_region): + """Analyze eye region for redness indicating alcohol consumption""" + if eye_region.size == 0: + return False + + # Convert to different color spaces for better analysis + hsv = cv2.cvtColor(eye_region, cv2.COLOR_BGR2HSV) + + # Define red color range in HSV + lower_red1 = np.array([0, 50, 50]) + upper_red1 = np.array([10, 255, 255]) + lower_red2 = np.array([170, 50, 50]) + upper_red2 = np.array([180, 255, 255]) + + # Create masks for red areas + mask1 = cv2.inRange(hsv, lower_red1, upper_red1) + mask2 = cv2.inRange(hsv, lower_red2, upper_red2) + red_mask = mask1 + mask2 + + # Calculate red pixel percentage + red_pixels = cv2.countNonZero(red_mask) + total_pixels = eye_region.shape[0] * eye_region.shape[1] + red_ratio = red_pixels / total_pixels if total_pixels > 0 else 0 + + return red_ratio > self.red_eye_threshold + + def detect_delayed_blink(self, ear, timestamp): + """Detect delayed or slow blinking patterns""" + # Detect blink events (when EAR drops significantly) + if ear < self.ear_threshold: + if not hasattr(self, 'blink_start_time'): + self.blink_start_time = timestamp + else: + if hasattr(self, 'blink_start_time'): + blink_duration = timestamp - self.blink_start_time + self.blink_times.append(blink_duration) + delattr(self, 'blink_start_time') + + # Analyze blink patterns + if len(self.blink_times) >= 3: + avg_blink_duration = np.mean(list(self.blink_times)) + return avg_blink_duration > self.blink_delay_threshold + + return False + + def detect_droopy_eyelids(self, landmarks): + """Detect droopy eyelids indicating impairment""" + # Calculate eyelid openness for both eyes + left_eye_landmarks = [landmarks[i] for i in self.left_eye] + right_eye_landmarks = [landmarks[i] for i in self.right_eye] + + left_ear = self.calculate_ear(left_eye_landmarks) + right_ear = self.calculate_ear(right_eye_landmarks) + + avg_ear = (left_ear + right_ear) / 2.0 + + # Check if eyes are consistently partially closed (not fully closed like blinking) + return 0.15 < avg_ear < 0.22 # Between fully closed and normal open + + def send_emergency_alert(self, alert_type, severity): + """Send emergency alert to contacts""" + def send_email(): + try: + # Configure your email settings here + sender_email = "your_email@gmail.com" + password = "your_password" # Use app password for Gmail + + message = MIMEMultipart() + message["From"] = sender_email + message["Subject"] = f"URGENT: Driver Impairment Alert - {alert_type}" + + body = f""" + EMERGENCY ALERT: Driver Impairment Detected + + Alert Type: {alert_type} + Severity: {severity} + Time: {time.strftime('%Y-%m-%d %H:%M:%S')} + + Detected Indicators: + - Head Sway: {'Yes' if self.drunk_indicators['head_sway'] else 'No'} + - Delayed Blinking: {'Yes' if self.drunk_indicators['delayed_blink'] else 'No'} + - Red Eyes: {'Yes' if self.drunk_indicators['red_eyes'] else 'No'} + - Droopy Eyelids: {'Yes' if self.drunk_indicators['droopy_eyelids'] else 'No'} + - Drowsiness: {'Yes' if self.drowsy_detected else 'No'} + + Immediate action may be required. + """ + + message.attach(MIMEText(body, "plain")) + + # Send email to all emergency contacts + with smtplib.SMTP("smtp.gmail.com", 587) as server: + server.starttls() + server.login(sender_email, password) + for contact in self.emergency_contacts: + message["To"] = contact + text = message.as_string() + server.sendmail(sender_email, contact, text) + + print("Emergency alert sent successfully!") + + except Exception as e: + print(f"Failed to send emergency alert: {e}") + + # Send email in separate thread to avoid blocking main process + email_thread = threading.Thread(target=send_email) + email_thread.daemon = True + email_thread.start() + + def process_frame(self, frame): + """Main processing function for each frame""" + gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + faces = self.detector(gray) + + current_time = time.time() + + for face in faces: + landmarks = self.predictor(gray, face) + landmarks = [(p.x, p.y) for p in landmarks.parts()] + + # Extract eye regions + left_eye_landmarks = [landmarks[i] for i in self.left_eye] + right_eye_landmarks = [landmarks[i] for i in self.right_eye] + + # Calculate EAR for drowsiness detection + left_ear = self.calculate_ear(left_eye_landmarks) + right_ear = self.calculate_ear(right_eye_landmarks) + ear = (left_ear + right_ear) / 2.0 + + # Drowsiness detection + if ear < self.ear_threshold: + self.ear_counter += 1 + if self.ear_counter >= self.ear_consec_frames: + self.drowsy_detected = True + else: + self.ear_counter = 0 + self.drowsy_detected = False + + # Get head pose for drunk detection + head_angles = self.get_head_pose(landmarks, frame.shape) + + # Drunk driver detection + self.drunk_indicators['head_sway'] = self.detect_head_sway(head_angles) + self.drunk_indicators['delayed_blink'] = self.detect_delayed_blink(ear, current_time) + self.drunk_indicators['droopy_eyelids'] = self.detect_droopy_eyelids(landmarks) + + # Extract eye regions for color analysis + left_eye_rect = cv2.boundingRect(np.array(left_eye_landmarks)) + right_eye_rect = cv2.boundingRect(np.array(right_eye_landmarks)) + + left_eye_region = frame[left_eye_rect[1]:left_eye_rect[1]+left_eye_rect[3], + left_eye_rect[0]:left_eye_rect[0]+left_eye_rect[2]] + right_eye_region = frame[right_eye_rect[1]:right_eye_rect[1]+right_eye_rect[3], + right_eye_rect[0]:right_eye_rect[0]+right_eye_rect[2]] + + # Analyze eye color for redness + left_red = self.analyze_eye_color(left_eye_region) + right_red = self.analyze_eye_color(right_eye_region) + self.drunk_indicators['red_eyes'] = left_red or right_red + + # Draw landmarks and information on frame + self.draw_analysis_results(frame, landmarks, ear, head_angles) + + # Check for alerts + self.check_and_trigger_alerts() + + return frame + + def draw_analysis_results(self, frame, landmarks, ear, head_angles): + """Draw analysis results on the frame""" + # Draw eye landmarks + for i in self.left_eye + self.right_eye: + cv2.circle(frame, landmarks[i], 2, (0, 255, 0), -1) + + # Display EAR + cv2.putText(frame, f"EAR: {ear:.2f}", (10, 30), + cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2) + + # Display head angles + cv2.putText(frame, f"Head: P{head_angles[0]:.1f} Y{head_angles[1]:.1f} R{head_angles[2]:.1f}", + (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2) + + # Display alerts + y_offset = 90 + if self.drowsy_detected: + cv2.putText(frame, "DROWSINESS ALERT!", (10, y_offset), + cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3) + y_offset += 40 + + # Display drunk indicators + drunk_count = sum(self.drunk_indicators.values()) + if drunk_count > 0: + cv2.putText(frame, f"IMPAIRMENT INDICATORS: {drunk_count}/4", (10, y_offset), + cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 165, 255), 2) + y_offset += 30 + + for indicator, status in self.drunk_indicators.items(): + if status: + cv2.putText(frame, f"- {indicator.replace('_', ' ').title()}", (20, y_offset), + cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 165, 255), 2) + y_offset += 25 + + def check_and_trigger_alerts(self): + """Check conditions and trigger appropriate alerts""" + drunk_indicators_count = sum(self.drunk_indicators.values()) + + # High severity: Multiple drunk indicators + drowsiness + if drunk_indicators_count >= 2 and self.drowsy_detected: + if not self.alert_active: + self.send_emergency_alert("SEVERE IMPAIRMENT", "HIGH") + self.alert_active = True + + # Medium severity: Multiple drunk indicators OR drowsiness with some indicators + elif drunk_indicators_count >= 3 or (self.drowsy_detected and drunk_indicators_count >= 1): + if not self.alert_active: + self.send_emergency_alert("MODERATE IMPAIRMENT", "MEDIUM") + self.alert_active = True + + # Reset alert flag if conditions improve + elif drunk_indicators_count == 0 and not self.drowsy_detected: + self.alert_active = False + +# Example usage +if __name__ == "__main__": + detector = ImpairedDriverDetector() + cap = cv2.VideoCapture(0) + + print("Enhanced Driver Impairment Detection System Started") + print("Press 'q' to quit, 'r' to reset alerts") + + while True: + ret, frame = cap.read() + if not ret: + break + + # Process frame + processed_frame = detector.process_frame(frame) + + # Display result + cv2.imshow('Enhanced Driver Impairment Detection', processed_frame) + + key = cv2.waitKey(1) & 0xFF + if key == ord('q'): + break + elif key == ord('r'): + # Reset detection states + detector.alert_active = False + detector.drowsy_detected = False + detector.drunk_indicators = {k: False for k in detector.drunk_indicators} + print("Alerts reset") + + cap.release() + cv2.destroyAllWindows() diff --git a/main.py b/main.py index fd4b5a1..b4ece5f 100644 --- a/main.py +++ b/main.py @@ -1,9 +1,12 @@ import tkinter as tk from tkinter import ttk, messagebox import subprocess +import json +import os -face_proc = None -is_dark_mode = False # Tracks current theme state +face_proc = None +blink_proc = None +enhanced_proc = None def run_face_detection(): global face_proc @@ -13,73 +16,534 @@ def run_face_detection(): messagebox.showerror("Error", f"Failed to run face detection:\n{e}") def run_blink_detection(): + global blink_proc try: - subprocess.call(["python", "blinkDetect.py"]) + blink_proc = subprocess.Popen(["python", "blinkDetect.py"]) except Exception as e: messagebox.showerror("Error", f"Failed to run blink detection:\n{e}") -def toggle_theme(root, frame, toggle_btn): - global is_dark_mode +def run_enhanced_detection(): + global enhanced_proc + try: + enhanced_proc = subprocess.Popen(["python", "enhanced_driver_detection.py"]) + except Exception as e: + messagebox.showerror("Error", f"Failed to run enhanced detection:\n{e}") - if is_dark_mode: - # Switch to light mode - root.configure(bg="#f0f0f0") - frame.configure(style="Light.TFrame") - toggle_btn.config(text="Switch to Dark Mode") - ttk.Style().configure('TButton', background="#ffffff", foreground="#000000") - else: - # Switch to dark mode - root.configure(bg="#2e2e2e") - frame.configure(style="Dark.TFrame") - toggle_btn.config(text="Switch to Light Mode") - ttk.Style().configure('TButton', background="#444444", foreground="#ffffff") +def run_lane_detection(): + try: + subprocess.Popen(["python", "lanedetection.py"]) + except Exception as e: + messagebox.showerror("Error", f"Failed to run lane detection:\n{e}") - is_dark_mode = not is_dark_mode +def configure_emergency_contacts(): + """Open configuration window for emergency contacts""" + config_window = tk.Toplevel() + config_window.title("Emergency Contacts Configuration") + config_window.geometry("500x400") + config_window.grab_set() + + # Load existing config + config_file = "emergency_config.json" + try: + with open(config_file, 'r') as f: + config = json.load(f) + except: + config = { + "emergency_contacts": ["emergency@example.com"], + "email_settings": { + "sender_email": "your_email@gmail.com", + "sender_password": "your_app_password" + } + } + + # Create form + tk.Label(config_window, text="Emergency Contacts Configuration", + font=('Arial', 14, 'bold')).pack(pady=10) + + # Email settings frame + email_frame = ttk.LabelFrame(config_window, text="Email Settings", padding=10) + email_frame.pack(fill='x', padx=20, pady=10) + + tk.Label(email_frame, text="Sender Email:").grid(row=0, column=0, sticky='w') + sender_email_var = tk.StringVar(value=config["email_settings"]["sender_email"]) + tk.Entry(email_frame, textvariable=sender_email_var, width=35).grid(row=0, column=1, padx=5) + + tk.Label(email_frame, text="App Password:").grid(row=1, column=0, sticky='w') + sender_password_var = tk.StringVar(value=config["email_settings"]["sender_password"]) + tk.Entry(email_frame, textvariable=sender_password_var, width=35, show='*').grid(row=1, column=1, padx=5) + + # Help text for Gmail setup + help_text = tk.Text(email_frame, height=3, width=50, wrap=tk.WORD, font=('Arial', 8)) + help_text.insert(tk.END, "Gmail Setup: Enable 2FA โ†’ Go to Google Account Settings โ†’ Security โ†’ App passwords โ†’ Generate app-specific password") + help_text.config(state=tk.DISABLED, bg='#f0f0f0') + help_text.grid(row=2, column=0, columnspan=2, pady=5, sticky='ew') + + # Emergency contacts frame + contacts_frame = ttk.LabelFrame(config_window, text="Emergency Contacts", padding=10) + contacts_frame.pack(fill='both', expand=True, padx=20, pady=10) + + # Contacts listbox with scrollbar + listbox_frame = tk.Frame(contacts_frame) + listbox_frame.pack(fill='both', expand=True, pady=(0, 10)) + + contacts_listbox = tk.Listbox(listbox_frame, height=8) + scrollbar = tk.Scrollbar(listbox_frame, orient=tk.VERTICAL) + contacts_listbox.config(yscrollcommand=scrollbar.set) + scrollbar.config(command=contacts_listbox.yview) + + contacts_listbox.pack(side=tk.LEFT, fill='both', expand=True) + scrollbar.pack(side=tk.RIGHT, fill=tk.Y) + + # Populate listbox + for contact in config["emergency_contacts"]: + contacts_listbox.insert(tk.END, contact) + + # Buttons frame + buttons_frame = tk.Frame(contacts_frame) + buttons_frame.pack(fill='x') + + # Add contact + def add_contact(): + contact = tk.simpledialog.askstring("Add Contact", "Enter email address:") + if contact and '@' in contact and '.' in contact: + contacts_listbox.insert(tk.END, contact) + elif contact: + messagebox.showwarning("Invalid Email", "Please enter a valid email address") + + # Remove contact + def remove_contact(): + selection = contacts_listbox.curselection() + if selection: + contacts_listbox.delete(selection) + else: + messagebox.showwarning("No Selection", "Please select a contact to remove") + + tk.Button(buttons_frame, text="Add Contact", command=add_contact, + bg='#4CAF50', fg='white', font=('Arial', 9, 'bold')).pack(side='left', padx=5) + tk.Button(buttons_frame, text="Remove Contact", command=remove_contact, + bg='#f44336', fg='white', font=('Arial', 9, 'bold')).pack(side='left', padx=5) + + # Test email button + def test_email(): + try: + test_config = { + "emergency_contacts": [sender_email_var.get()], + "email_settings": { + "sender_email": sender_email_var.get(), + "sender_password": sender_password_var.get() + } + } + + # Simple test - just validate format for now + if '@' in sender_email_var.get() and sender_password_var.get(): + messagebox.showinfo("Test", "Email configuration looks valid!\nActual test will occur during alert.") + else: + messagebox.showerror("Test Failed", "Please provide valid email and password") + + except Exception as e: + messagebox.showerror("Test Failed", f"Configuration error:\n{e}") + + tk.Button(buttons_frame, text="Test Config", command=test_email, + bg='#FF9800', fg='white', font=('Arial', 9, 'bold')).pack(side='right', padx=5) + + # Save configuration + def save_config(): + try: + contacts_list = list(contacts_listbox.get(0, tk.END)) + if not contacts_list: + messagebox.showwarning("Warning", "Please add at least one emergency contact") + return + + new_config = { + "emergency_contacts": contacts_list, + "email_settings": { + "sender_email": sender_email_var.get(), + "sender_password": sender_password_var.get() + } + } + + with open(config_file, 'w') as f: + json.dump(new_config, f, indent=2) + + messagebox.showinfo("Success", f"Configuration saved successfully!\nContacts: {len(contacts_list)}") + config_window.destroy() + + except Exception as e: + messagebox.showerror("Error", f"Failed to save configuration:\n{e}") + + # Save button + tk.Button(config_window, text="Save Configuration", command=save_config, + bg='#2196F3', fg='white', font=('Arial', 11, 'bold'), + padx=20, pady=8).pack(pady=15) + +def show_help(): + """Show detailed help information""" + help_window = tk.Toplevel() + help_window.title("Enhanced Driver Detection System - Help") + help_window.geometry("700x600") + help_window.grab_set() + + # Create scrollable text widget + text_frame = tk.Frame(help_window) + text_frame.pack(fill='both', expand=True, padx=10, pady=10) + + help_text = tk.Text(text_frame, wrap=tk.WORD, font=('Consolas', 10)) + scrollbar = tk.Scrollbar(text_frame, orient=tk.VERTICAL, command=help_text.yview) + help_text.config(yscrollcommand=scrollbar.set) + + help_content = """ + ๐Ÿš— ENHANCED DRIVER DROWSINESS & IMPAIRMENT DETECTION SYSTEM ๐Ÿš— + + โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ• + + ๐Ÿ“‹ SYSTEM OVERVIEW: + This advanced system combines multiple detection algorithms to monitor driver + safety in real-time, detecting both drowsiness and signs of impairment. + + ๐Ÿ”ง DETECTION MODULES: + + 1. ๐Ÿ‘ค Face Detection (Basic) + โ€ข Simple face recognition using Haar cascades + โ€ข Foundation for other detection systems + โ€ข Good for testing camera functionality + + 2. ๐Ÿ‘๏ธ Blink Detection (Original) + โ€ข Eye Aspect Ratio (EAR) based drowsiness detection + โ€ข Blink counting and pattern analysis + โ€ข Audio alerts for drowsiness + โ€ข Session tracking with statistics + + 3. ๐Ÿšจ Enhanced Detection (RECOMMENDED) + โ€ข Complete drowsiness detection (EAR analysis) + โ€ข Advanced impairment detection: + โœ“ Head movement/sway analysis + โœ“ Delayed blinking patterns + โœ“ Eye redness detection + โœ“ Droopy eyelid detection + โ€ข Emergency alert system via email + โ€ข Real-time visual indicators + โ€ข Comprehensive session tracking + + 4. ๐Ÿ›ฃ๏ธ Lane Detection + โ€ข Road lane tracking using computer vision + โ€ข Hough transform line detection + โ€ข Video processing capabilities + + โš™๏ธ SETUP REQUIREMENTS: + + ๐Ÿ“ Required Files: + โ€ข models/shape_predictor_68_face_landmarks.dat + โ€ข models/haarcascade_frontalface_default.xml + โ€ข alarm.wav (audio alert file) + + ๐Ÿ“ง Email Configuration: + โ€ข Gmail account with 2-factor authentication enabled + โ€ข App-specific password (NOT regular password) + โ€ข Emergency contact email addresses + + ๐Ÿ”ง Installation Steps: + 1. Install Python dependencies: pip install -r requirements.txt + 2. Download dlib face landmarks model + 3. Configure emergency contacts via settings + 4. Test camera access + + ๐ŸŽฏ ENHANCED DETECTION FEATURES: + + Drowsiness Indicators: + โ€ข Eye Aspect Ratio (EAR) monitoring + โ€ข Consecutive frame analysis + โ€ข Blink frequency patterns + โ€ข Audio and visual alerts + + Impairment Indicators: + โ€ข Head Pose Analysis: Detects excessive head swaying/movement + โ€ข Delayed Blink Detection: Identifies slow or delayed blinking + โ€ข Eye Color Analysis: Detects redness indicating possible impairment + โ€ข Eyelid Position: Monitors for droopy or partially closed eyelids + + ๐Ÿšจ Alert System: + โ€ข Visual warnings on screen + โ€ข Audio alarm for drowsiness + โ€ข Email alerts to emergency contacts + โ€ข Severity-based escalation + + ๐Ÿ“Š Session Tracking: + โ€ข Real-time EAR values + โ€ข Alert frequency + โ€ข Session duration + โ€ข Detection statistics + + โŒจ๏ธ KEYBOARD CONTROLS: + โ€ข 'q' - Quit detection + โ€ข 'r' - Reset all alerts + โ€ข ESC - Exit (in some modules) + + ๐Ÿ” TROUBLESHOOTING: + + Camera Issues: + โ€ข Check camera permissions + โ€ข Close other applications using camera + โ€ข Try different camera indices (0, 1, 2...) + + Detection Issues: + โ€ข Ensure good lighting conditions + โ€ข Position face clearly in camera view + โ€ข Check if required model files exist + + Email Issues: + โ€ข Verify 2FA is enabled on Gmail + โ€ข Use app password, not regular password + โ€ข Check internet connection + โ€ข Verify email addresses are valid + + Performance Issues: + โ€ข Close unnecessary applications + โ€ข Reduce camera resolution if needed + โ€ข Ensure adequate system resources + + ๐Ÿ“ˆ OPTIMAL USAGE: + โ€ข Use Enhanced Detection for comprehensive monitoring + โ€ข Configure email alerts before driving + โ€ข Test system in safe environment first + โ€ข Ensure stable camera mounting + โ€ข Maintain good lighting in vehicle + + โš ๏ธ IMPORTANT NOTES: + โ€ข This system is a safety aid, not a replacement for responsible driving + โ€ข Regular breaks and proper rest are essential + โ€ข System accuracy depends on lighting and camera quality + โ€ข Email alerts require internet connection + + ๐Ÿ”’ PRIVACY: + โ€ข All processing is done locally + โ€ข No data is sent to external servers + โ€ข Video is processed in real-time only + โ€ข Email alerts contain summary information only + + For technical support or questions, check the project documentation. + """ + + help_text.insert(tk.END, help_content) + help_text.config(state=tk.DISABLED) + + help_text.pack(side=tk.LEFT, fill='both', expand=True) + scrollbar.pack(side=tk.RIGHT, fill=tk.Y) + + tk.Button(help_window, text="Close", command=help_window.destroy, + bg='#607D8B', fg='white', font=('Arial', 10, 'bold')).pack(pady=10) + +def show_about(): + """Show about information""" + about_text = """ + Enhanced Driver Drowsiness & Impairment Detection System + Version 2.0 + + ๐ŸŽฏ Mission: Enhance road safety through advanced computer vision + + ๐Ÿ”ฌ Technology Stack: + โ€ข OpenCV - Computer vision processing + โ€ข dlib - Facial landmark detection + โ€ข NumPy/SciPy - Mathematical computations + โ€ข Python - Core development + + ๐Ÿš€ Features: + โ€ข Real-time drowsiness detection + โ€ข Advanced impairment analysis + โ€ข Emergency alert system + โ€ข Session tracking & analytics + + ๐Ÿ‘ฅ Developed for: Driver Safety & Road Traffic Management + + โš–๏ธ License: Educational and Research Use + + โš ๏ธ Disclaimer: This system is designed as a safety aid. + It does not replace the need for responsible driving practices, + adequate rest, and adherence to traffic laws. + """ + + messagebox.showinfo("About - Enhanced Detection System", about_text) + +def check_system_requirements(): + """Check if system has required components""" + missing_items = [] + + # Check required files + required_files = [ + ("models/shape_predictor_68_face_landmarks.dat", "Facial landmarks model"), + ("models/haarcascade_frontalface_default.xml", "Face detection cascade"), + ("alarm.wav", "Audio alert file") + ] + + for file_path, description in required_files: + if not os.path.exists(file_path): + missing_items.append(f"โ€ข {description}: {file_path}") + + # Check Python modules + required_modules = ['cv2', 'dlib', 'numpy', 'scipy'] + for module in required_modules: + try: + __import__(module) + except ImportError: + missing_items.append(f"โ€ข Python module: {module}") + + if missing_items: + warning_msg = "โš ๏ธ MISSING SYSTEM COMPONENTS:\n\n" + "\n".join(missing_items) + warning_msg += "\n\n๐Ÿ“ฅ Download required files and install dependencies for full functionality." + warning_msg += "\n\n๐Ÿ’ก See Help section for detailed setup instructions." + messagebox.showwarning("System Requirements Check", warning_msg) + return False + else: + messagebox.showinfo("System Check", "โœ… All required components found!\nSystem ready for operation.") + return True def on_quit(root): - if face_proc and face_proc.poll() is None: - face_proc.terminate() + global face_proc, blink_proc, enhanced_proc + + # Terminate all running processes + processes = [ + (face_proc, "Face Detection"), + (blink_proc, "Blink Detection"), + (enhanced_proc, "Enhanced Detection") + ] + + terminated_count = 0 + for proc, name in processes: + if proc and proc.poll() is None: + try: + proc.terminate() + terminated_count += 1 + print(f"Terminated {name} process") + except Exception as e: + print(f"Error terminating {name}: {e}") + + if terminated_count > 0: + print(f"Terminated {terminated_count} running detection process(es)") + + print("Enhanced Detection System shutdown complete") root.destroy() def main(): root = tk.Tk() - root.title("Driver Drowsiness Detection System") - root.geometry("500x500") - root.configure(bg="#f0f0f0") # Default light background - + root.title("Enhanced Driver Drowsiness & Impairment Detection System v2.0") + root.geometry("800x700") + root.configure(bg='#1a237e') + + # Configure modern styling style = ttk.Style() - style.theme_use("clam") - - # Frame styles - style.configure("Light.TFrame", background="#f0f0f0") - style.configure("Dark.TFrame", background="#2e2e2e") - - # Button styles - style.configure('TButton', - font=('Segoe UI', 14, 'bold'), - padding=10, - borderwidth=1, - relief="raised") - - frame = ttk.Frame(root, padding=20, style="Light.TFrame") - frame.pack(expand=True) - - btn_face = ttk.Button(frame, text="Face Detection", command=run_face_detection) - btn_face.grid(row=0, column=0, padx=15, pady=15) - - btn_blink = ttk.Button(frame, text="Blink Detection", command=run_blink_detection) - btn_blink.grid(row=0, column=1, padx=15, pady=15) - - # Toggle button - btn_toggle = ttk.Button(root, text="Switch to Dark Mode") - btn_toggle.config(command=lambda: toggle_theme(root, frame, btn_toggle)) - btn_toggle.pack(pady=10) - - # Quit button - btn_quit = ttk.Button(root, text="Quit", command=lambda: on_quit(root)) - btn_quit.pack(side=tk.BOTTOM, pady=20) - + style.theme_use('clam') + + # Custom color scheme + style.configure('Title.TLabel', font=('Arial', 18, 'bold'), + background='#1a237e', foreground='white') + style.configure('Subtitle.TLabel', font=('Arial', 11), + background='#1a237e', foreground='#e3f2fd') + style.configure('Custom.TButton', font=('Arial', 12, 'bold'), padding=12) + + # Header section + header_frame = tk.Frame(root, bg='#1a237e') + header_frame.pack(fill='x', pady=20) + + title_label = ttk.Label(header_frame, text="๐Ÿš— Enhanced Driver Safety System", + style='Title.TLabel') + title_label.pack() + + subtitle_label = ttk.Label(header_frame, text="Advanced Drowsiness & Impairment Detection with Emergency Alerts", + style='Subtitle.TLabel') + subtitle_label.pack(pady=(5, 0)) + + # Main content frame + main_frame = tk.Frame(root, bg='#f5f5f5') + main_frame.pack(expand=True, fill='both', padx=20, pady=10) + + # Detection modules frame + detection_frame = ttk.LabelFrame(main_frame, text="๐Ÿ” Detection Modules", padding=20) + detection_frame.pack(fill='x', pady=10) + + # Create detection buttons in a 2x2 grid + buttons_info = [ + ("๐Ÿ‘ค Face Detection", run_face_detection, "Basic face recognition & testing", '#2196F3', 'white'), + ("๐Ÿ‘๏ธ Blink Detection", run_blink_detection, "Original drowsiness detection system", '#FF5722', 'white'), + ("๐Ÿšจ Enhanced Detection", run_enhanced_detection, "Complete impairment detection + alerts", '#4CAF50', 'white'), + ("๐Ÿ›ฃ๏ธ Lane Detection", run_lane_detection, "Road lane tracking system", '#9C27B0', 'white') + ] + + buttons_grid = tk.Frame(detection_frame) + buttons_grid.pack(expand=True, fill='both') + + for i, (text, command, desc, bg_color, fg_color) in enumerate(buttons_info): + row = i // 2 + col = i % 2 + + btn_frame = tk.Frame(buttons_grid, bg='white', relief='raised', bd=2, padx=10, pady=10) + btn_frame.grid(row=row, column=col, padx=15, pady=15, sticky='ew') + + btn = tk.Button(btn_frame, text=text, command=command, + font=('Arial', 12, 'bold'), bg=bg_color, fg=fg_color, + relief='flat', padx=25, pady=18, cursor='hand2') + btn.pack(fill='x') + + desc_label = tk.Label(btn_frame, text=desc, font=('Arial', 9), + bg='white', fg='#555', wraplength=200) + desc_label.pack(pady=(8, 5)) + + # Configure grid weights for responsive layout + buttons_grid.columnconfigure(0, weight=1) + buttons_grid.columnconfigure(1, weight=1) + + # Configuration and help frame + config_frame = ttk.LabelFrame(main_frame, text="โš™๏ธ Configuration & Support", padding=15) + config_frame.pack(fill='x', pady=10) + + config_buttons = [ + ("๐Ÿ“ง Configure Emergency Contacts", configure_emergency_contacts, '#E91E63'), + ("โ“ Help & Setup Guide", show_help, '#607D8B'), + ("๐Ÿ” Check System Requirements", check_system_requirements, '#795548'), + ("โ„น๏ธ About System", show_about, '#455A64') + ] + + config_grid = tk.Frame(config_frame) + config_grid.pack(fill='x') + + for i, (text, command, color) in enumerate(config_buttons): + btn = tk.Button(config_grid, text=text, command=command, + font=('Arial', 10, 'bold'), bg=color, fg='white', + relief='flat', padx=20, pady=10, cursor='hand2') + btn.grid(row=i//2, column=i%2, padx=8, pady=5, sticky='ew') + + config_grid.columnconfigure(0, weight=1) + config_grid.columnconfigure(1, weight=1) + + # Status and tips frame + status_frame = tk.Frame(main_frame, bg='#e8f5e8', relief='groove', bd=2) + status_frame.pack(fill='x', pady=15) + + tip_label = tk.Label(status_frame, + text="๐Ÿ’ก Recommendation: Use Enhanced Detection for comprehensive safety monitoring", + font=('Arial', 10, 'italic'), bg='#e8f5e8', fg='#2e7d32', + pady=12) + tip_label.pack() + + # Footer with quit button + footer_frame = tk.Frame(root, bg='#1a237e') + footer_frame.pack(fill='x', side='bottom') + + quit_btn = tk.Button(footer_frame, text="๐Ÿšช Exit System", + command=lambda: on_quit(root), + font=('Arial', 12, 'bold'), bg='#d32f2f', fg='white', + relief='flat', padx=40, pady=12, cursor='hand2') + quit_btn.pack(pady=20) + + # Bind window close event + root.protocol("WM_DELETE_WINDOW", lambda: on_quit(root)) + + # Auto-check requirements after window loads + root.after(2000, check_system_requirements) + + print("๐Ÿš€ Enhanced Driver Detection System v2.0 Started") + print("๐Ÿ”ง Features: Drowsiness Detection + Impairment Analysis + Emergency Alerts") + root.mainloop() if __name__ == "__main__": + import tkinter.simpledialog main() diff --git a/requirements.txt b/requirements.txt index a812ee8..1640c58 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,9 +1,8 @@ +# ==================================================================================================================================== +# ENHANCED DRIVER DROWSINESS & IMPAIRMENT DETECTION SYSTEM - REQUIREMENTS # ==================================================================== -# DRIVER DROWSINESS DETECTION SYSTEM - REQUIREMENTS -# ==================================================================== -# This file contains all the Python dependencies required to run the -# driver drowsiness detection system. Each package is carefully versioned -# to ensure compatibility and stable performance. +# This file contains all Python dependencies for the enhanced system +# including drunk driver detection and emergency alert capabilities # # INSTALLATION INSTRUCTIONS: # 1. Create a virtual environment (recommended): @@ -40,7 +39,16 @@ scipy>=1.7.0,<2.0.0 # AUDIO PROCESSING AND ALERTS # ==================================================================== playsound>=1.2.2,<2.0.0 -playsound; platform_system == "Windows" # For compatibility on Windows + +# ==================================================================== +# EMAIL AND EMERGENCY ALERT SYSTEM +# ==================================================================== +# Built-in libraries used: smtplib, email.mime.text, email.mime.multipart +# No additional packages needed for basic email functionality + +# For enhanced email features (optional): +# yagmail>=0.15.0,<1.0.0 # Simplified Gmail sending +# sendgrid>=6.9.0,<7.0.0 # SendGrid API support # ==================================================================== # DATA VISUALIZATION AND PLOTTING @@ -57,33 +65,87 @@ mediapipe>=0.10.0,<1.0.0 # ==================================================================== # PWA (PROGRESSIVE WEB APP) DEPENDENCIES # ==================================================================== -# For generating PWA icons and assets Pillow>=8.0.0,<11.0.0 # ==================================================================== # WEB SERVING AND STATIC FILES # ==================================================================== -# Additional web server capabilities for PWA -aiofiles>=0.7.0,<1.0.0 # For async file serving +aiofiles>=0.7.0,<1.0.0 + +# ==================================================================== +# CONFIGURATION AND DATA HANDLING +# ==================================================================== +# Built-in libraries used: json, os, threading, queue, collections +# No additional packages needed + +# ==================================================================== +# ENHANCED FEATURES DEPENDENCIES +# ==================================================================== +# For head pose estimation and advanced computer vision +# All functionality uses OpenCV and dlib - no additional packages needed + +# For mathematical calculations (euler angles, etc.) +# Uses built-in math library and numpy # ==================================================================== -# BUILT-IN PYTHON LIBRARIES (No installation required) +# GUI DEPENDENCIES # ==================================================================== -# - tkinter -# - threading -# - queue -# - subprocess -# - time -# - sys +# tkinter - Built-in with Python +# No additional packages needed for GUI + +# ==================================================================== +# BUILT-IN PYTHON LIBRARIES USED (No installation required) +# ==================================================================== +# Core libraries: +# - tkinter (GUI) +# - threading (Multi-threading) +# - queue (Thread communication) +# - subprocess (Process management) +# - time (Time operations) +# - sys (System operations) +# - os (Operating system interface) +# - json (JSON handling) +# - collections (deque for data storage) +# - math (Mathematical operations) +# - smtplib (Email sending) +# - email.mime.text, email.mime.multipart (Email formatting) + +# ==================================================================== +# OPTIONAL ENHANCEMENTS +# ==================================================================== +# For production deployment: +# gunicorn>=20.1.0,<21.0.0 # WSGI server +# nginx # Reverse proxy (system package) + +# For advanced logging: +# loguru>=0.6.0,<1.0.0 + +# For configuration management: +# python-dotenv>=0.19.0,<1.0.0 + +# For database support (future feature): +# sqlite3 # Built-in with Python +# sqlalchemy>=1.4.0,<2.0.0 # Optional ORM # ==================================================================== # COMPATIBILITY NOTES # ==================================================================== -# Python Version: Tested with Python 3.7 - 3.10 +# Python Version: Tested with Python 3.7 - 3.11 # Operating Systems: Windows 10/11, macOS 10.15+, Ubuntu 18.04+ # # TROUBLESHOOTING: -# - If dlib installation fails, install Visual Studio Build Tools (Windows) -# - For macOS users, install Xcode command line tools: xcode-select --install -# - Linux users may need: sudo apt-get install cmake libopenblas-dev liblapack-dev -# ==================================================================== +# - If dlib installation fails: +# * Windows: Install Visual Studio Build Tools +# * macOS: Install Xcode command line tools: xcode-select --install +# * Linux: sudo apt-get install cmake libopenblas-dev liblapack-dev +# +# - For email functionality: +# * Use Gmail App Passwords (not regular password) +# * Enable 2-factor authentication on Gmail +# * Generate app-specific password in Google Account settings +# +# - For webcam access issues: +# * Ensure camera permissions are granted +# * Check if other applications are using the camera +# * Try different camera indices (0, 1, 2, etc.) +# ==== diff --git a/setup_guide.md b/setup_guide.md new file mode 100644 index 0000000..b13936f --- /dev/null +++ b/setup_guide.md @@ -0,0 +1,314 @@ +# Enhanced Driver Drowsiness & Impairment Detection System - Setup Guide + +## ๐Ÿš€ Quick Start + +This enhanced system now includes drunk driver detection capabilities alongside the original drowsiness detection. + +### New Features Added: +- **Head Movement Analysis** - Detects excessive swaying indicating impairment +- **Delayed Blink Detection** - Identifies slow or delayed blinking patterns +- **Eye Redness Detection** - Analyzes eye color for signs of alcohol consumption +- **Droopy Eyelid Detection** - Detects partially closed eyes indicating impairment +- **Emergency Alert System** - Sends email alerts to emergency contacts +- **Enhanced UI** - Improved interface with configuration options + +## ๐Ÿ“‹ Prerequisites + +- Python 3.7 - 3.11 +- Webcam/Camera +- Internet connection (for emergency alerts) +- Gmail account (for email alerts) + +## ๐Ÿ›  Installation Steps + +### 1. Clone and Setup Environment + +```bash +# Clone the repository +git clone https://github.com/Parthavi19/driver-drowsiness-detection-system.git +cd driver-drowsiness-detection-system + +# Create virtual environment +python -m venv drowsiness_env + +# Activate virtual environment +# On Windows: +drowsiness_env\Scripts\activate +# On macOS/Linux: +source drowsiness_env/bin/activate +``` + +### 2. Install Dependencies + +```bash +# Install all required packages +pip install -r requirements.txt + +# If dlib installation fails, try: +pip install cmake +pip install dlib +``` + +### 3. Download Required Model Files + +Create a `models` directory and download these files: + +#### Facial Landmark Predictor: +```bash +# Create models directory +mkdir models + +# Download shape predictor (68-point facial landmarks) +# Download from: https://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2 +# Extract and place in models/ folder +``` + +#### Haar Cascade (if not present): +```bash +# Download from OpenCV repository or use the existing one +# URL: https://github.com/opencv/opencv/blob/master/data/haarcascades/haarcascade_frontalface_default.xml +``` + +### 4. Add Audio Alert File + +```bash +# Add alarm.wav file to root directory +# You can use any .wav audio file and rename it to alarm.wav +``` + +### 5. Configure Email Settings + +#### Gmail Setup (Recommended): +1. Enable 2-factor authentication on your Gmail account +2. Generate an App Password: + - Go to Google Account settings + - Security โ†’ 2-Step Verification โ†’ App passwords + - Generate password for "Mail" +3. Use this app password (not your regular Gmail password) + +## ๐Ÿšฆ Running the System + +### Option 1: Enhanced Detection (Recommended) +```bash +python main.py +``` +Then click "๐Ÿšจ Enhanced Detection" button. + +### Option 2: Direct Enhanced Detection +```bash +python enhanced_driver_detection.py +``` + +### Option 3: Original Modules +```bash +# Original blink detection (now enhanced) +python blinkDetect.py + +# Face detection only +python face-try.py + +# Lane detection +python lanedetection.py +``` + +## โš™๏ธ Configuration + +### Emergency Contacts Setup: +1. Run `python main.py` +2. Click "โš™๏ธ Configure Emergency Contacts" +3. Add your email settings and emergency contact emails +4. Save configuration + +### Manual Configuration: +Create `emergency_config.json`: +```json +{ + "emergency_contacts": [ + "emergency1@example.com", + "emergency2@example.com" + ], + "email_settings": { + "sender_email": "your_email@gmail.com", + "sender_password": "your_app_password" + } +} +``` + +## ๐ŸŽฏ Usage Instructions + +### Controls During Detection: +- **'q'** - Quit the application +- **'r'** - Reset all alerts and counters +- **ESC** - Exit (in some modules) + +### Detection Indicators: + +#### Drowsiness Detection: +- **EAR (Eye Aspect Ratio)** - Shows eye openness level +- **Blink Count** - Number of blinks detected +- **DROWSINESS ALERT** - Triggered when eyes closed too long + +#### Impairment Detection: +- **Head Pose** - Shows pitch, yaw, roll angles +- **Impairment Indicators** - Shows count of active indicators: + - Head Sway + - Delayed Blinking + - Red Eyes + - Droopy Eyelids + +### Alert Levels: +- **LOW** - Single indicator detected +- **MEDIUM** - Multiple indicators OR drowsiness + some indicators +- **HIGH** - Multiple indicators + drowsiness (emergency alert sent) + +## ๐Ÿ”ง Troubleshooting + +### Common Issues: + +#### 1. Camera Not Working: +```python +# Try different camera indices +cap = cv2.VideoCapture(1) # Try 1, 2, etc. instead of 0 +``` + +#### 2. Dlib Installation Failed: +```bash +# On Windows: +# Install Visual Studio Build Tools from Microsoft +pip install cmake +pip install dlib + +# On macOS: +xcode-select --install +pip install cmake dlib + +# On Linux: +sudo apt-get install cmake libopenblas-dev liblapack-dev +pip install dlib +``` + +#### 3. Face Detection Not Working: +- Ensure good lighting +- Check if `models/shape_predictor_68_face_landmarks.dat` exists +- Try adjusting `FACE_DOWNSAMPLE_RATIO` in the code + +#### 4. Email Alerts Not Sending: +- Verify Gmail app password (not regular password) +- Check internet connection +- Ensure 2-factor authentication is enabled +- Try with a test email first + +#### 5. Performance Issues: +```python +# Reduce processing load by adjusting these parameters: +FACE_DOWNSAMPLE_RATIO = 2.0 # Increase for faster processing +RESIZE_HEIGHT = 360 # Decrease for better performance +``` + +## ๐Ÿ“ File Structure + +``` +driver-drowsiness-detection-system/ +โ”œโ”€โ”€ main.py # Enhanced main UI +โ”œโ”€โ”€ enhanced_driver_detection.py # New comprehensive detection +โ”œโ”€โ”€ blinkDetect.py # Enhanced with drunk detection +โ”œโ”€โ”€ face-try.py # Original face detection +โ”œโ”€โ”€ lanedetection.py # Lane detection +โ”œโ”€โ”€ requirements.txt # Updated dependencies +โ”œโ”€โ”€ emergency_config.json # Email configuration +โ”œโ”€โ”€ models/ +โ”‚ โ”œโ”€โ”€ shape_predictor_68_face_landmarks.dat +โ”‚ โ””โ”€โ”€ haarcascade_frontalface_default.xml +โ”œโ”€โ”€ alarm.wav # Audio alert file +โ””โ”€โ”€ icons/ # PWA icons (for web version) +``` + +## ๐Ÿงช Testing the System + +### 1. Test Face Detection: +```bash +python face-try.py +# Should show rectangles around detected faces +``` + +### 2. Test Enhanced Detection: +```bash +python enhanced_driver_detection.py +# Should show multiple detection indicators +``` + +### 3. Test Email Alerts: +- Configure emergency contacts +- Simulate impairment (look away, close eyes partially) +- Check if email alert is received + +## ๐Ÿ” Security Notes + +- **Never commit email passwords to version control** +- Use Gmail App Passwords, not regular passwords +- Consider using environment variables for sensitive data +- Regularly update dependencies for security patches + +## ๐Ÿ“Š Performance Optimization + +### For Better Performance: +1. **Adjust Resolution**: + ```python + RESIZE_HEIGHT = 320 # Lower resolution + ``` + +2. **Reduce Face Detection Area**: + ```python + FACE_DOWNSAMPLE_RATIO = 2.0 # Process smaller image + ``` + +3. **Optimize Detection Frequency**: + ```python + # Process every N frames instead of every frame + if frame_count % 3 == 0: # Process every 3rd frame + # Run detection + ``` + +## ๐Ÿ†˜ Emergency Features + +### Automatic Emergency Response: +- System automatically sends emails when severe impairment detected +- Multiple detection methods ensure accuracy +- Configurable sensitivity levels +- Manual reset capability + +### Emergency Contact Best Practices: +- Add multiple emergency contacts +- Include family members and local emergency services +- Test email delivery periodically +- Keep contact list updated + +## ๐Ÿ”„ Updates and Maintenance + +### Regular Maintenance: +1. Update dependencies monthly: + ```bash + pip install --upgrade -r requirements.txt + ``` + +2. Check model file integrity +3. Test emergency alert system +4. Review and update emergency contacts + +### Contributing: +- Report issues on GitHub +- Submit pull requests for improvements +- Share detection accuracy feedback +- Suggest new safety features + +## ๐Ÿ“ž Support + +For issues and support: +1. Check this troubleshooting guide +2. Review GitHub issues +3. Test with different lighting conditions +4. Verify all dependencies are installed correctly + +--- + +**โš ๏ธ Important Safety Note**: This system is designed to assist in detecting driver impairment but should not be the sole safety measure. Always prioritize responsible driving practices and seek help if experiencing impairment.