-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathFaceRecog.html
More file actions
153 lines (136 loc) · 6.66 KB
/
FaceRecog.html
File metadata and controls
153 lines (136 loc) · 6.66 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Real-Time Face Detector</title>
<!-- Tailwind CSS for styling -->
<script src="https://cdn.tailwindcss.com"></script>
<!-- face-api.js for machine learning -->
<script src="https://cdn.jsdelivr.net/npm/face-api.js@0.22.2/dist/face-api.min.js"></script>
<style>
/* Custom styles for a clean, modern look */
body {
font-family: 'Inter', sans-serif;
}
/* Style to overlay the canvas on top of the video feed */
#videoContainer {
position: relative;
display: flex;
justify-content: center;
align-items: center;
}
canvas {
position: absolute;
top: 0;
left: 0;
}
/* Style for the loading spinner */
.loader {
border: 8px solid #f3f3f3;
border-top: 8px solid #3498db;
border-radius: 50%;
width: 60px;
height: 60px;
animation: spin 2s linear infinite;
}
@keyframes spin {
0% { transform: rotate(0deg); }
100% { transform: rotate(360deg); }
}
</style>
</head>
<body class="bg-gray-900 text-white flex flex-col items-center justify-center min-h-screen p-4">
<div class="w-full max-w-4xl mx-auto text-center">
<h1 class="text-4xl md:text-5xl font-bold mb-2 bg-clip-text text-transparent bg-gradient-to-r from-blue-400 to-purple-500">Real-Time Face Detector</h1>
<p class="text-gray-400 mb-6">Allow camera access to see the magic. Each detected face will be highlighted.</p>
<!-- Container for the Video and Canvas -->
<div id="videoContainer" class="relative w-full aspect-video bg-gray-800 rounded-lg shadow-2xl overflow-hidden mx-auto">
<!-- Loading indicator -->
<div id="loader" class="absolute inset-0 flex flex-col items-center justify-center bg-gray-900 bg-opacity-75 z-20">
<div class="loader mb-4"></div>
<p id="loadingMessage" class="text-lg">Loading AI Models...</p>
</div>
<!-- Video element to display camera feed -->
<video id="video" width="1280" height="720" autoplay muted class="w-full h-full object-cover"></video>
<!-- Canvas element to draw the face detection boxes -->
<canvas id="canvas" class="absolute top-0 left-0"></canvas>
</div>
<button id="startButton" class="mt-6 px-6 py-3 bg-blue-600 hover:bg-blue-700 text-white font-semibold rounded-lg shadow-md transition-transform transform hover:scale-105 focus:outline-none focus:ring-2 focus:ring-blue-500 focus:ring-opacity-50">
Start Camera
</button>
</div>
<script>
const video = document.getElementById('video');
const canvas = document.getElementById('canvas');
const loader = document.getElementById('loader');
const loadingMessage = document.getElementById('loadingMessage');
const startButton = document.getElementById('startButton');
// Predefined colors for the bounding boxes
const boxColors = [
'#FF5733', '#33FF57', '#3357FF', '#FF33A1', '#A133FF',
'#33FFA1', '#FFC300', '#C70039', '#900C3F', '#581845'
];
// Function to load the AI models
async function loadModels() {
// Paths to the models
const MODEL_URL = 'https://cdn.jsdelivr.net/gh/justadudewhohacks/face-api.js@0.22.2/weights';
try {
// We are using a small and fast model for real-time detection
await faceapi.nets.tinyFaceDetector.loadFromUri(MODEL_URL);
loadingMessage.innerText = 'Models Loaded. Ready to Start!';
} catch (error) {
console.error("Error loading models:", error);
loadingMessage.innerText = 'Failed to load AI models. Please refresh.';
}
}
// Function to start the video stream
async function startVideo() {
try {
const stream = await navigator.mediaDevices.getUserMedia({ video: {} });
video.srcObject = stream;
} catch (err) {
console.error('Error accessing camera:', err);
loadingMessage.innerText = 'Camera access denied. Please allow camera access and refresh.';
loader.style.display = 'none'; // Hide spinner
}
}
// Event listener for the start button
startButton.addEventListener('click', () => {
startVideo();
startButton.style.display = 'none'; // Hide button after starting
});
// Event listener for when the video starts playing
video.addEventListener('play', () => {
// Hide the loader once video is playing
loader.style.display = 'none';
// Match canvas dimensions to the video display size
const displaySize = { width: video.clientWidth, height: video.clientHeight };
faceapi.matchDimensions(canvas, displaySize);
// Set up an interval to detect faces repeatedly
setInterval(async () => {
// Detect all faces in the video frame
const detections = await faceapi.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions());
if (!detections) return;
// Resize the detected boxes to match the video display size
const resizedDetections = faceapi.resizeResults(detections, displaySize);
// Get the 2D context of the canvas and clear it
const context = canvas.getContext('2d');
context.clearRect(0, 0, canvas.width, canvas.height);
// Draw a unique colored box for each detected face
resizedDetections.forEach((detection, i) => {
const box = detection.box;
const drawBox = new faceapi.draw.DrawBox(box, {
// Cycle through the predefined colors
boxColor: boxColors[i % boxColors.length],
lineWidth:
});
drawBox.draw(canvas);
});
}, 100); // Run detection every 100 milliseconds
});
// Load models as soon as the script runs
loadModels();
</script>
</body>
</html>