|
| 1 | +/* |
| 2 | + * 👋 Hello! This is an ml5.js example made and shared with ❤️. |
| 3 | + * Learn more about the ml5.js project: https://ml5js.org/ |
| 4 | + * ml5.js license and Code of Conduct: https://github.com/ml5js/ml5-next-gen/blob/main/LICENSE.md |
| 5 | + * |
| 6 | + * This example demonstrates building and texturing a 3D mesh using depth estimation on the webcam video. |
| 7 | + * |
| 8 | + * Use your mouse to drag and zoom the 3D mesh in space. |
| 9 | + */ |
| 10 | + |
| 11 | +let depthEstimator; |
| 12 | +let webcam; |
| 13 | +let depthMap; |
| 14 | +let mesh; |
| 15 | + |
| 16 | +// Video dimensions, |
| 17 | +// making them smaller will increase speed |
| 18 | +// but will also reduce the accuracy of the depth map |
| 19 | +let videoWidth = 320; |
| 20 | +let videoHeight = 240; |
| 21 | + |
| 22 | +// Whether the data in the depthMap is new |
| 23 | +let newDataAvailable = false; |
| 24 | + |
| 25 | +let options = { |
| 26 | + // Default is 4, but since this image is smaller, we change it to 2 so as to not lose too much detail |
| 27 | + dilationFactor: 2, |
| 28 | +}; |
| 29 | + |
| 30 | +async function setup() { |
| 31 | + // Load the depth estimation model |
| 32 | + depthEstimator = await ml5.depthEstimation(options); |
| 33 | + |
| 34 | + // Create a canvas larger than the video and turn on WEBGL mode for 3D |
| 35 | + createCanvas(videoWidth * 2, videoHeight * 2, WEBGL); |
| 36 | + |
| 37 | + // Create the video capture element |
| 38 | + webcam = createCapture(VIDEO); |
| 39 | + webcam.size(videoWidth, videoHeight); // Set video size |
| 40 | + webcam.hide(); // Hide the default HTML video element |
| 41 | + |
| 42 | + mesh = new p5.Geometry(); |
| 43 | + |
| 44 | + // Start continuous depth estimation on the webcam feed and make "gotResults" the callback function |
| 45 | + depthEstimator.estimateStart(webcam, gotResults); |
| 46 | + |
| 47 | + noStroke(); |
| 48 | +} |
| 49 | + |
| 50 | +function draw() { |
| 51 | + // Turn on dragging and zooming with the mouse |
| 52 | + orbitControl(); |
| 53 | + |
| 54 | + // If there is new depth data |
| 55 | + if (newDataAvailable) { |
| 56 | + background(0); |
| 57 | + |
| 58 | + // Clear the mesh geometry to start fresh |
| 59 | + freeGeometry(mesh); |
| 60 | + mesh = new p5.Geometry(); |
| 61 | + |
| 62 | + // Go through each pixel in the webcam video |
| 63 | + for (let y = 0; y < webcam.height; y++) { |
| 64 | + for (let x = 0; x < webcam.width; x++) { |
| 65 | + // Get the depth value from the model (float, 0 - 1) where 0 is closest and 1 is farthest |
| 66 | + let depthAtPixel = depthMap.getDepthAt(x, y); |
| 67 | + |
| 68 | + // Get the index for current pixel in webcam pixels array |
| 69 | + let index = (x + y * webcam.width) * 4; |
| 70 | + |
| 71 | + //Get the z depth value for the current pixel and scale it up |
| 72 | + let z = map(depthAtPixel, 0, 1, 200, -200); |
| 73 | + |
| 74 | + // Create the vertex as a vector and set its UV coordinates for texturing |
| 75 | + // 3D pixels can be called voxels |
| 76 | + const voxel = createVector(x, y, z); |
| 77 | + mesh.vertices.push(voxel); |
| 78 | + mesh.uvs.push(x / webcam.width, y / webcam.height); |
| 79 | + |
| 80 | + // For every pixel up to the edges |
| 81 | + if (x < webcam.width - 1 && y < webcam.height - 1) { |
| 82 | + // Divide index by 4 to get the voxel number in the list of vertices |
| 83 | + let voxelIndex = index / 4; |
| 84 | + |
| 85 | + //Let's get the 4 vertices of this "zone" of the mesh |
| 86 | + let a = voxelIndex; // Current pixel |
| 87 | + let b = voxelIndex + 1; // x + 1 pixel |
| 88 | + let c = voxelIndex + webcam.width; // y + 1 pixel |
| 89 | + let d = voxelIndex + webcam.width + 1; // x + 1 and y + 1 pixel |
| 90 | + |
| 91 | + // Lets get the depth values for each of the 4 vertices |
| 92 | + const aDepth = depthMap.getDepthAt(x, y); |
| 93 | + const bDepth = depthMap.getDepthAt(x + 1, y); |
| 94 | + const cDepth = depthMap.getDepthAt(x, y + 1); |
| 95 | + const dDepth = depthMap.getDepthAt(x + 1, y + 1); |
| 96 | + |
| 97 | + // Each "zone" with 4 vertices consists of two |
| 98 | + // adjacent triangles: abc and bdc |
| 99 | + |
| 100 | + // Only add them if they are not part of |
| 101 | + // the background, meaning their depth is not 0. |
| 102 | + if (!(aDepth === 0 || bDepth === 0 || cDepth === 0)) { |
| 103 | + // First triangle |
| 104 | + mesh.faces.push([a, b, c]); |
| 105 | + } |
| 106 | + |
| 107 | + if (!(bDepth === 0 || dDepth === 0 || cDepth === 0)) { |
| 108 | + // Second triangle |
| 109 | + mesh.faces.push([b, d, c]); |
| 110 | + } |
| 111 | + } |
| 112 | + } |
| 113 | + } |
| 114 | + |
| 115 | + // Calculate the orientation of the faces in the mesh |
| 116 | + mesh.computeNormals(); |
| 117 | + push(); |
| 118 | + |
| 119 | + // Double the size to fill the canvas |
| 120 | + scale(2); |
| 121 | + |
| 122 | + // Align the mesh to the center of the canvas |
| 123 | + translate(-videoWidth / 2, -videoHeight / 2, 0); |
| 124 | + |
| 125 | + // Set the video frame that was used to create the depth map as the texture of the mesh |
| 126 | + texture(depthMap.sourceFrame); |
| 127 | + model(mesh); |
| 128 | + |
| 129 | + pop(); |
| 130 | + |
| 131 | + // The data is no longer new |
| 132 | + newDataAvailable = false; |
| 133 | + } |
| 134 | +} |
| 135 | + |
| 136 | +// Callback function that receives the depth estimation results |
| 137 | +function gotResults(result) { |
| 138 | + // Store the latest result in the global variable depthMap |
| 139 | + depthMap = result; |
| 140 | + newDataAvailable = true; |
| 141 | +} |
0 commit comments