-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathsketch.js
More file actions
183 lines (162 loc) · 5.36 KB
/
sketch.js
File metadata and controls
183 lines (162 loc) · 5.36 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
// HOW TO USE
// predictWebcam(video) will start predicting landmarks
// pass a video MediaElement using createCapture
// make sure to call predictWebcam as a callback to createCapture
// this ensures the video is ready
// parts index and documentation:
// https://developers.google.com/mediapipe/solutions/vision/hand_landmarker
let capture;
let captureEvent;
let predictionsElement;
function setup() {
createCanvas(windowWidth, windowHeight);
captureWebcam();
predictionsElement = document.getElementById("predictions");
}
function draw() {
background(255);
// drawBlendShapes(predictionsElement, mediaPipe.faceBlendshapes);
// flip the webcam image so it looks like a mirror
push();
scale(-1, 1); // mirror webcam
image(capture, -capture.width, 0); // draw webcam
scale(-1, 1); // unset mirror
pop();
// put a yellow circle on each landmark
if (mediaPipe.faceLandmarks.length > 0) {
// we have a face
mediaPipe.faceLandmarks.forEach((face, index) => {
face.forEach((landmark, index) => {
noStroke();
fill("yellow");
circle(...getFlipPos(landmark), 5);
});
});
}
// helper functions to draw parts of the face
noStroke();
fill(0);
circlePart(mediaPipe.parts.leftIris);
circlePart(mediaPipe.parts.rightIris);
strokeWeight(1);
stroke("white");
outLinePart(mediaPipe.parts.tesselation);
strokeWeight(3);
stroke("red");
outLinePart(mediaPipe.parts.leftEye);
stroke("green");
outLinePart(mediaPipe.parts.rightEye);
stroke("tomato");
outLinePart(mediaPipe.parts.faceOval);
stroke("hotpink");
outLinePart(mediaPipe.parts.lips);
}
// return flipped x and y positions
function getFlipPos(part, xAdd = 0, yAdd = 0) {
return [
capture.width - part.x * capture.width + xAdd,
part.y * capture.height + yAdd,
];
}
// draw lines between each 'bit' of a 'part
function outLinePart(part) {
if (part && part.length > 0 && mediaPipe.faceLandmarks.length > 0) {
// let start = mediaPipe.parts.leftEye[0].start;
// console.log(mediaPipe.faceLandmarks[0][start]);
part.forEach((bit) => {
line(
...getFlipPos(mediaPipe.faceLandmarks[0][bit.start]),
...getFlipPos(mediaPipe.faceLandmarks[0][bit.end])
);
});
}
}
// draw a filled shape between each 'bit' of a 'part
function fillPart(part) {
if (part && part.length > 0 && mediaPipe.faceLandmarks.length > 0) {
// let start = mediaPipe.parts.leftEye[0].start;
// console.log(mediaPipe.faceLandmarks[0][start]);
beginShape();
part.forEach((bit, index) => {
// if (index === 0)
// vertex(...getFlipPos(mediaPipe.faceLandmarks[0][bit.start]));
vertex(...getFlipPos(mediaPipe.faceLandmarks[0][bit.end]));
});
endShape(CLOSE);
}
}
// useful for the iris
// estimate the centre and width of a circle then draw
function circlePart(part) {
if (part && part.length > 0 && mediaPipe.faceLandmarks.length > 0) {
// get minimum and maximum x and y values
const xArray = part.map((bit) => mediaPipe.faceLandmarks[0][bit.end].x);
const yArray = part.map((bit) => mediaPipe.faceLandmarks[0][bit.end].y);
const diameter = Math.max(...xArray) - Math.min(...xArray);
const x = xArray.reduce((total, item) => total + item) / part.length;
const y = yArray.reduce((total, item) => total + item) / part.length;
circle(...getFlipPos({ x, y }), diameter * capture.width);
}
}
// this function helps to captuer the webcam in a way that ensure video is loaded
// before we start predicting landmarks. Creatcapture has a callback which is
// only called when the video is correctly loaded. At that point we set the dimensions
// and start predicting landmarks
function captureWebcam() {
capture = createCapture(
{
audio: false,
video: {
facingMode: "user",
},
},
function (e) {
captureEvent = e;
console.log(captureEvent.getTracks()[0].getSettings());
// do things when video ready
// until then, the video element will have no dimensions, or default 640x480
capture.srcObject = e;
setCameraDimensions();
mediaPipe.predictWebcam(capture);
}
);
capture.elt.setAttribute("playsinline", "");
capture.hide();
}
// this function sets the dimensions of the video element to match the
// dimensions of the camera. This is important because the camera may have
// different dimensions than the default video element
function setCameraDimensions() {
// resize the capture depending on whether
// the camera is landscape or portrait
if (capture.width > capture.height) {
capture.size(width, (capture.height / capture.width) * width);
} else {
capture.size((capture.width / capture.height) * height, height);
}
}
// resize the canvas when the window is resized
// also reset the camera dimensions
function windowResized() {
resizeCanvas(windowWidth, windowHeight);
setCameraDimensions();
}
function drawBlendShapes(el, blendShapes) {
if (!blendShapes.length) {
return;
}
let htmlMaker = "";
blendShapes[0].categories.map((shape) => {
htmlMaker += `
<li class="blend-shapes-item">
<span class="blend-shapes-label">${
shape.displayName || shape.categoryName
}</span>
<span class="blend-shapes-value" style="width: calc(${
+shape.score * 100
}% - 120px)">${(+shape.score).toFixed(4)}</span>
</li>
`;
});
el.innerHTML = htmlMaker;
}