Skip to content

Commit 3c2dc8c

Browse files
committed
update example and bug fix
1 parent 176a2ad commit 3c2dc8c

File tree

3 files changed

+26
-57
lines changed

3 files changed

+26
-57
lines changed

examples/PoseDetection/index.html

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
<html>
99
<head>
1010
<meta charset="UTF-8" />
11-
<title>PoseNet example using p5.js</title>
11+
<title>Pose detection example using p5.js</title>
1212
<script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/1.6.0/p5.js"></script>
1313
<script src="../../dist/ml5.js"></script>
1414
</head>

examples/PoseDetection/sketch.js

Lines changed: 20 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -14,42 +14,41 @@ let poses = [];
1414

1515
function setup() {
1616
createCanvas(640, 480);
17+
18+
// Create the video and hide it
1719
video = createCapture(VIDEO);
1820
video.size(width, height);
19-
20-
// Create a new poseNet method with a single detection
21-
poseNet = ml5.poseDetection(video, modelReady);
22-
// This sets up an event that fills the global variable "poses"
23-
// with an array every time new poses are detected
24-
poseNet.on("pose", function (results) {
25-
poses = results;
26-
});
27-
// Hide the video element, and just show the canvas
2821
video.hide();
22+
23+
// Load the model and attach an event
24+
poseDetector = ml5.poseDetection(video, modelReady);
25+
poseDetector.on("pose", gotPoses);
2926
}
3027

28+
// Event for pose detection
29+
function gotPoses(results) {
30+
// Always save the latest output from the model in global variable "poses"
31+
poses = results;
32+
}
33+
34+
// Event for when model loaded
3135
function modelReady() {
32-
console.log("Model Loaded!");
36+
console.log("Model ready!");
3337
}
3438

3539
function draw() {
40+
console.log(poses);
41+
// Draw the video
3642
image(video, 0, 0, width, height);
37-
//console.log(poses);
38-
// We can call both functions to draw all keypoints and the skeletons
39-
drawKeypoints();
40-
//drawSkeleton();
41-
}
4243

43-
// A function to draw ellipses over the detected keypoints
44-
function drawKeypoints() {
45-
// Loop through all the poses detected
44+
// Draw all the tracked landmark points
45+
// for each individual pose detected
4646
for (let i = 0; i < poses.length; i++) {
47-
// For each pose detected, loop through all the keypoints
4847
let pose = poses[i];
48+
// for each keypoint in the pose
4949
for (let j = 0; j < pose.keypoints.length; j++) {
50-
// A keypoint is an object describing a body part (like rightArm or leftShoulder)
5150
let keypoint = pose.keypoints[j];
52-
// Only draw an ellipse is the pose probability is bigger than 0.2
51+
// Only draw an ellipse if the confidence score of the keypoint is bigger than 0.2
5352
if (keypoint.score > 0.2) {
5453
fill(255, 0, 0);
5554
noStroke();
@@ -58,23 +57,3 @@ function drawKeypoints() {
5857
}
5958
}
6059
}
61-
62-
// A function to draw the skeletons
63-
function drawSkeleton() {
64-
// Loop through all the skeletons detected
65-
for (let i = 0; i < poses.length; i++) {
66-
let skeleton = poses[i].skeleton;
67-
// For every skeleton, loop through all body connections
68-
for (let j = 0; j < skeleton.length; j++) {
69-
let partA = skeleton[j][0];
70-
let partB = skeleton[j][1];
71-
stroke(255, 0, 0);
72-
line(
73-
partA.position.x,
74-
partA.position.y,
75-
partB.position.x,
76-
partB.position.y
77-
);
78-
}
79-
}
80-
}

src/PoseDetection/index.js

Lines changed: 5 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,7 @@ class PoseDetection extends EventEmitter {
6363
trackerType: this.config.trackerType ?? "boundingBox",
6464
trackerConfig: this.config.trackerConfig,
6565
};
66+
// use multi-pose lightning model by default
6667
switch (this.config.modelType) {
6768
case "SINGLEPOSE_LIGHTNING":
6869
modelConfig.modelType =
@@ -71,7 +72,8 @@ class PoseDetection extends EventEmitter {
7172
case "SINGLEPOSE_THUNDER":
7273
modelConfig.modelType =
7374
bodyPoseDetection.movenet.modelType.SINGLEPOSE_THUNDER;
74-
case "MULTIPOSE_LIGHTNING":
75+
break;
76+
default:
7577
modelConfig.modelType =
7678
bodyPoseDetection.movenet.modelType.MULTIPOSE_LIGHTNING;
7779
}
@@ -87,18 +89,7 @@ class PoseDetection extends EventEmitter {
8789
return this;
8890
}
8991

90-
//Add named keypoints to a MoveNet pose object
91-
// mapParts(pose) {
92-
// const newPose = JSON.parse(JSON.stringify(pose));
93-
// newPose.keypoints.forEach((keypoint) => {
94-
// newPose[keypoint.part] = {
95-
// x: keypoint.position.x,
96-
// y: keypoint.position.y,
97-
// confidence: keypoint.score,
98-
// };
99-
// });
100-
// return newPose;
101-
// }
92+
//TODO: Add named keypoints to a MoveNet pose object
10293

10394
/**
10495
* Given an image or video, returns an array of objects containing pose estimations
@@ -114,8 +105,7 @@ class PoseDetection extends EventEmitter {
114105
await mediaReady(image, false);
115106
const result = await this.model.estimatePoses(image);
116107

117-
//Add named keypoints to each pose object
118-
//const result = poses.map((pose) => this.mapParts(pose));
108+
// TODO: Add named keypoints to each pose object
119109

120110
this.emit("pose", result);
121111

0 commit comments

Comments
 (0)