Skip to content

Commit 6294b24

Browse files
authored
DOCS-1876: Add examples for ML and vision services (#547)
1 parent bbc7514 commit 6294b24

File tree

2 files changed

+74
-3
lines changed

2 files changed

+74
-3
lines changed

src/viam/services/mlmodel/mlmodel.py

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,17 @@ class MLModel(ServiceBase):
2626
async def infer(self, input_tensors: Dict[str, NDArray], *, timeout: Optional[float]) -> Dict[str, NDArray]:
2727
"""Take an already ordered input tensor as an array, make an inference on the model, and return an output tensor map.
2828
29+
::
30+
31+
import numpy as np
32+
33+
my_mlmodel = MLModelClient.from_robot(robot=robot, name="my_mlmodel_service")
34+
35+
nd_array = np.array([1, 2, 3], dtype=np.float64)
36+
input_tensors = {"0": nd_array}
37+
38+
output_tensors = await my_mlmodel.infer(input_tensors)
39+
2940
Args:
3041
input_tensors (Dict[str, NDArray]): A dictionary of input flat tensors as specified in the metadata
3142
@@ -38,6 +49,12 @@ async def infer(self, input_tensors: Dict[str, NDArray], *, timeout: Optional[fl
3849
async def metadata(self, *, timeout: Optional[float]) -> Metadata:
3950
"""Get the metadata (such as name, type, expected tensor/array shape, inputs, and outputs) associated with the ML model.
4051
52+
::
53+
54+
my_mlmodel = MLModelClient.from_robot(robot=robot, name="my_mlmodel_service")
55+
56+
metadata = await my_mlmodel.metadata()
57+
4158
Returns:
4259
Metadata: The metadata
4360
"""

src/viam/services/vision/vision.py

Lines changed: 57 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,16 @@ async def get_detections_from_camera(
3434
) -> List[Detection]:
3535
"""Get a list of detections in the next image given a camera and a detector
3636
37+
::
38+
39+
camera_name = "cam1"
40+
41+
# Grab the detector you configured on your machine
42+
my_detector = VisionClient.from_robot(robot, "my_detector")
43+
44+
# Get detections from the next image from the camera
45+
detections = await my_detector.get_detections_from_camera(camera_name)
46+
3747
Args:
3848
camera_name (str): The name of the camera to use for detection
3949
@@ -54,6 +64,20 @@ async def get_detections(
5464
) -> List[Detection]:
5565
"""Get a list of detections in the given image using the specified detector
5666
67+
::
68+
69+
# Grab camera from the machine
70+
cam1 = Camera.from_robot(robot, "cam1")
71+
72+
# Get the detector you configured on your machine
73+
my_detector = VisionClient.from_robot(robot, "my_detector")
74+
75+
# Get an image from the camera
76+
img = await cam1.get_image()
77+
78+
# Get detections from that image
79+
detections = await my_detector.get_detections(img)
80+
5781
Args:
5882
image (Image): The image to get detections from
5983
@@ -75,6 +99,17 @@ async def get_classifications_from_camera(
7599
) -> List[Classification]:
76100
"""Get a list of classifications in the next image given a camera and a classifier
77101
102+
::
103+
104+
camera_name = "cam1"
105+
106+
# Grab the classifier you configured on your machine
107+
my_classifier = VisionClient.from_robot(robot, "my_classifier")
108+
109+
# Get the 2 classifications with the highest confidence scores from the next image from the camera
110+
classifications = await my_classifier.get_classifications_from_camera(
111+
camera_name, 2)
112+
78113
Args:
79114
camera_name (str): The name of the camera to use for detection
80115
count (int): The number of classifications desired
@@ -95,6 +130,20 @@ async def get_classifications(
95130
) -> List[Classification]:
96131
"""Get a list of classifications in the given image using the specified classifier
97132
133+
::
134+
135+
# Grab camera from the machine
136+
cam1 = Camera.from_robot(robot, "cam1")
137+
138+
# Get the classifier you configured on your machine
139+
my_classifier = VisionClient.from_robot(robot, "my_classifier")
140+
141+
# Get an image from the camera
142+
img = await cam1.get_image()
143+
144+
# Get the 2 classifications with the highest confidence scores
145+
classifications = await my_classifier.get_classifications(img, 2)
146+
98147
Args:
99148
image (Image): The image to get detections from
100149
count (int): The number of classifications desired
@@ -117,16 +166,21 @@ async def get_object_point_clouds(
117166
picture obtained from the specified 3D camera (using the specified segmenter).
118167
119168
To deserialize the returned information into a numpy array, use the Open3D library.
169+
120170
::
121171
122172
import numpy as np
123173
import open3d as o3d
124174
125-
object_point_clouds = await vision.get_object_point_clouds(camera_name, segmenter_name)
126-
175+
# Grab the 3D camera from the machine
176+
cam1 = Camera.from_robot(robot, "cam1")
177+
# Grab the object segmenter you configured on your machine
178+
my_segmenter = VisionClient.from_robot(robot, "my_segmenter")
179+
# Get the objects from the camera output
180+
objects = await my_segmenter.get_object_point_clouds(cam1)
127181
# write the first object point cloud into a temporary file
128182
with open("/tmp/pointcloud_data.pcd", "wb") as f:
129-
f.write(object_point_clouds[0].point_cloud)
183+
f.write(objects[0].point_cloud)
130184
pcd = o3d.io.read_point_cloud("/tmp/pointcloud_data.pcd")
131185
points = np.asarray(pcd.points)
132186

0 commit comments

Comments
 (0)