|
| 1 | +import cv2 |
| 2 | +import numpy as np |
| 3 | +from sklearn.neighbors import KNeighborsClassifier |
| 4 | + |
| 5 | +# Load the pre-trained gait recognition model (or train a new one) |
| 6 | +model = KNeighborsClassifier(n_neighbors=3) |
| 7 | + |
| 8 | +# Function to perform background subtraction and silhouette extraction |
| 9 | +def extract_silhouette(frame): |
| 10 | + # Convert the frame to grayscale |
| 11 | + gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) |
| 12 | + |
| 13 | + # Apply background subtraction |
| 14 | + fgmask = cv2.createBackgroundSubtractorMOG2().apply(gray) |
| 15 | + |
| 16 | + # Threshold to binarize the silhouette |
| 17 | + _, silhouette = cv2.threshold(fgmask, 250, 255, cv2.THRESH_BINARY) |
| 18 | + |
| 19 | + return silhouette |
| 20 | + |
| 21 | +# Function to extract gait features from the silhouette |
| 22 | +def extract_gait_features(silhouette): |
| 23 | + # Example: Extract contour area as a feature |
| 24 | + contours, _ = cv2.findContours(silhouette, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) |
| 25 | + if contours: |
| 26 | + largest_contour = max(contours, key=cv2.contourArea) |
| 27 | + return [cv2.contourArea(largest_contour)] |
| 28 | + return [0] # Return zero if no valid silhouette is found |
| 29 | + |
| 30 | +# Start capturing video (from webcam or pre-recorded video) |
| 31 | +cap = cv2.VideoCapture('walking_video.mp4') |
| 32 | + |
| 33 | +while True: |
| 34 | + ret, frame = cap.read() |
| 35 | + if not ret: |
| 36 | + break |
| 37 | + |
| 38 | + # Extract silhouette from the current frame |
| 39 | + silhouette = extract_silhouette(frame) |
| 40 | + |
| 41 | + # Extract gait features |
| 42 | + gait_features = extract_gait_features(silhouette) |
| 43 | + |
| 44 | + # Display the silhouette |
| 45 | + cv2.imshow("Silhouette", silhouette) |
| 46 | + |
| 47 | + # Check for user input to exit |
| 48 | + if cv2.waitKey(1) & 0xFF == ord('q'): |
| 49 | + break |
| 50 | + |
| 51 | +cap.release() |
| 52 | +cv2.destroyAllWindows() |
0 commit comments