Skip to content

Commit 3710b44

Browse files
author
awstools
committed
feat(client-lookoutvision): This release introduces support for image segmentation models and updates CPU accelerator options for models hosted on edge devices.
1 parent ef1043e commit 3710b44

File tree

5 files changed

+235
-21
lines changed

5 files changed

+235
-21
lines changed

clients/client-lookoutvision/src/LookoutVision.ts

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -475,12 +475,17 @@ export class LookoutVision extends LookoutVisionClient {
475475
/**
476476
* <p>Detects anomalies in an image that you supply. </p>
477477
* <p>The response from <code>DetectAnomalies</code> includes a boolean prediction
478-
* that the image contains one or more anomalies and a confidence value for the prediction.</p>
478+
* that the image contains one or more anomalies and a confidence value for the prediction.
479+
* If the model is an image segmentation model, the response also includes segmentation
480+
* information for each type of anomaly found in the image.</p>
479481
* <note>
480482
* <p>Before calling <code>DetectAnomalies</code>, you must first start your model with the <a>StartModel</a> operation.
481483
* You are charged for the amount of time, in minutes, that a model runs and for the number of anomaly detection units that your
482484
* model uses. If you are not using a model, use the <a>StopModel</a> operation to stop your model. </p>
483485
* </note>
486+
*
487+
* <p>For more information, see <i>Detecting anomalies in an image</i> in the Amazon Lookout for Vision developer guide.</p>
488+
*
484489
* <p>This operation requires permissions to perform the
485490
* <code>lookoutvision:DetectAnomalies</code> operation.</p>
486491
*/

clients/client-lookoutvision/src/commands/DetectAnomaliesCommand.ts

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,12 +39,17 @@ export interface DetectAnomaliesCommandOutput extends DetectAnomaliesResponse, _
3939
/**
4040
* <p>Detects anomalies in an image that you supply. </p>
4141
* <p>The response from <code>DetectAnomalies</code> includes a boolean prediction
42-
* that the image contains one or more anomalies and a confidence value for the prediction.</p>
42+
* that the image contains one or more anomalies and a confidence value for the prediction.
43+
* If the model is an image segmentation model, the response also includes segmentation
44+
* information for each type of anomaly found in the image.</p>
4345
* <note>
4446
* <p>Before calling <code>DetectAnomalies</code>, you must first start your model with the <a>StartModel</a> operation.
4547
* You are charged for the amount of time, in minutes, that a model runs and for the number of anomaly detection units that your
4648
* model uses. If you are not using a model, use the <a>StopModel</a> operation to stop your model. </p>
4749
* </note>
50+
*
51+
* <p>For more information, see <i>Detecting anomalies in an image</i> in the Amazon Lookout for Vision developer guide.</p>
52+
*
4853
* <p>This operation requires permissions to perform the
4954
* <code>lookoutvision:DetectAnomalies</code> operation.</p>
5055
* @example

clients/client-lookoutvision/src/models/models_0.ts

Lines changed: 98 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,42 @@ export class AccessDeniedException extends __BaseException {
2525
}
2626
}
2727

28+
/**
29+
* <p>Information about the pixels in an anomaly mask. For more information, see <a>Anomaly</a>.
30+
* <code>PixelAnomaly</code> is only returned by image segmentation models.</p>
31+
*/
32+
export interface PixelAnomaly {
33+
/**
34+
* <p>The percentage area of the image that the anomaly type covers.</p>
35+
*/
36+
TotalPercentageArea?: number;
37+
38+
/**
39+
* <p>A hex color value for the mask that covers an anomaly type. Each anomaly type has
40+
* a different mask color. The color maps to the color of the anomaly type used in the
41+
* training dataset. </p>
42+
*/
43+
Color?: string;
44+
}
45+
46+
/**
47+
* <p>Information about an anomaly type found on an image by an image segmentation model.
48+
* For more information, see <a>DetectAnomalies</a>.</p>
49+
*/
50+
export interface Anomaly {
51+
/**
52+
* <p>The name of an anomaly type found in an image.
53+
* <code>Name</code> maps to an anomaly type in the training dataset, apart from the anomaly type <code>background</code>.
54+
* The service automatically inserts the <code>background</code> anomaly type into the response from <code>DetectAnomalies</code>. </p>
55+
*/
56+
Name?: string;
57+
58+
/**
59+
* <p>Information about the pixel mask that covers an anomaly type.</p>
60+
*/
61+
PixelAnomaly?: PixelAnomaly;
62+
}
63+
2864
export enum ResourceType {
2965
DATASET = "DATASET",
3066
MODEL = "MODEL",
@@ -919,13 +955,24 @@ export interface TargetPlatform {
919955
Arch: TargetPlatformArch | string | undefined;
920956

921957
/**
922-
* <p>The target accelerator for the model. NVIDIA (Nvidia graphics processing unit)
923-
* is the only accelerator that is currently supported. You must also specify the <code>gpu-code</code>, <code>trt-ver</code>,
924-
* and <code>cuda-ver</code> compiler options.
925-
*
926-
* </p>
958+
* <p>The target accelerator for the model. Currently, Amazon Lookout for Vision only supports NVIDIA (Nvidia graphics processing unit)
959+
* and CPU accelerators. If you specify NVIDIA as an accelerator, you must also specify the <code>gpu-code</code>, <code>trt-ver</code>,
960+
* and <code>cuda-ver</code> compiler options. If you don't specify an accelerator, Lookout for Vision uses the CPU for compilation and we highly recommend that you use the
961+
* <a>GreengrassConfiguration$CompilerOptions</a> field. For example, you can use the following compiler options for CPU: </p>
962+
* <ul>
963+
* <li>
964+
* <p>
965+
* <code>mcpu</code>: CPU micro-architecture. For example, <code>{'mcpu': 'skylake-avx512'}</code>
966+
* </p>
967+
* </li>
968+
* <li>
969+
* <p>
970+
* <code>mattr</code>: CPU flags. For example, <code>{'mattr': ['+neon', '+vfpv4']}</code>
971+
* </p>
972+
* </li>
973+
* </ul>
927974
*/
928-
Accelerator: TargetPlatformAccelerator | string | undefined;
975+
Accelerator?: TargetPlatformAccelerator | string;
929976
}
930977

931978
/**
@@ -940,8 +987,8 @@ export interface TargetPlatform {
940987
export interface GreengrassConfiguration {
941988
/**
942989
* <p>Additional compiler options for the Greengrass component. Currently,
943-
* only NVIDIA Graphics Processing Units (GPU) are supported. If you specify <code>TargetPlatform</code>, you must specify
944-
* <code>CompilerOptions</code>. If you specify <code>TargetDevice</code>, don't specify <code>CompilerOptions</code>.</p>
990+
* only NVIDIA Graphics Processing Units (GPU) and CPU accelerators are supported.
991+
* If you specify <code>TargetDevice</code>, don't specify <code>CompilerOptions</code>.</p>
945992
*
946993
*
947994
* <p>For more information, see
@@ -1232,7 +1279,11 @@ export interface ImageSource {
12321279
}
12331280

12341281
/**
1235-
* <p>The prediction results from a call to <a>DetectAnomalies</a>.</p>
1282+
* <p>The prediction results from a call to <a>DetectAnomalies</a>.
1283+
* <code>DetectAnomalyResult</code> includes classification information for the prediction (<code>IsAnomalous</code> and <code>Confidence</code>).
1284+
* If the model you use is an image segementation model, <code>DetectAnomalyResult</code> also includes segmentation information (<code>Anomalies</code>
1285+
* and <code>AnomalyMask</code>). Classification information is calculated separately from segmentation information
1286+
* and you shouldn't assume a relationship between them.</p>
12361287
*/
12371288
export interface DetectAnomalyResult {
12381289
/**
@@ -1242,14 +1293,36 @@ export interface DetectAnomalyResult {
12421293
Source?: ImageSource;
12431294

12441295
/**
1245-
* <p>True if the image contains an anomaly, otherwise false.</p>
1296+
* <p>True if Amazon Lookout for Vision classifies the image as containing an anomaly, otherwise false.</p>
12461297
*/
12471298
IsAnomalous?: boolean;
12481299

12491300
/**
1250-
* <p>The confidence that Amazon Lookout for Vision has in the accuracy of the prediction.</p>
1301+
* <p>The confidence that Lookout for Vision has in the accuracy of the classification in <code>IsAnomalous</code>.</p>
12511302
*/
12521303
Confidence?: number;
1304+
1305+
/**
1306+
* <p>If the model is an image segmentation model, <code>Anomalies</code> contains a list of
1307+
* anomaly types found in the image. There is one entry for each type of anomaly found (even
1308+
* if multiple instances of an anomaly type exist on the image). The first element in the list
1309+
* is always an anomaly type representing the image background ('background') and shouldn't be
1310+
* considered an anomaly. Amazon Lookout for Vision automatically add the background anomaly type to the
1311+
* response, and you don't need to declare a background anomaly type in your dataset.</p>
1312+
* <p>If the list has one entry ('background'), no anomalies were found on the image.</p>
1313+
* <p></p>
1314+
* <p>An image classification model doesn't return an <code>Anomalies</code> list. </p>
1315+
*/
1316+
Anomalies?: Anomaly[];
1317+
1318+
/**
1319+
* <p>If the model is an image segmentation model, <code>AnomalyMask</code> contains pixel masks that covers all anomaly types found on the image.
1320+
*
1321+
* Each anomaly type has a different mask color. To map a color to an anomaly type, see the <code>color</code> field
1322+
* of the <a>PixelAnomaly</a> object.</p>
1323+
* <p>An image classification model doesn't return an <code>Anomalies</code> list. </p>
1324+
*/
1325+
AnomalyMask?: Uint8Array;
12531326
}
12541327

12551328
export interface DetectAnomaliesResponse {
@@ -1738,6 +1811,20 @@ export interface UpdateDatasetEntriesResponse {
17381811
Status?: DatasetStatus | string;
17391812
}
17401813

1814+
/**
1815+
* @internal
1816+
*/
1817+
export const PixelAnomalyFilterSensitiveLog = (obj: PixelAnomaly): any => ({
1818+
...obj,
1819+
});
1820+
1821+
/**
1822+
* @internal
1823+
*/
1824+
export const AnomalyFilterSensitiveLog = (obj: Anomaly): any => ({
1825+
...obj,
1826+
});
1827+
17411828
/**
17421829
* @internal
17431830
*/

clients/client-lookoutvision/src/protocols/Aws_restJson1.ts

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,7 @@ import {
6060
import { LookoutVisionServiceException as __BaseException } from "../models/LookoutVisionServiceException";
6161
import {
6262
AccessDeniedException,
63+
Anomaly,
6364
ConflictException,
6465
DatasetDescription,
6566
DatasetGroundTruthManifest,
@@ -81,6 +82,7 @@ import {
8182
ModelPerformance,
8283
OutputConfig,
8384
OutputS3Object,
85+
PixelAnomaly,
8486
ProjectDescription,
8587
ProjectMetadata,
8688
ResourceNotFoundException,
@@ -2497,6 +2499,26 @@ const serializeAws_restJson1TargetPlatform = (input: TargetPlatform, context: __
24972499
};
24982500
};
24992501

2502+
const deserializeAws_restJson1Anomaly = (output: any, context: __SerdeContext): Anomaly => {
2503+
return {
2504+
Name: __expectString(output.Name),
2505+
PixelAnomaly:
2506+
output.PixelAnomaly != null ? deserializeAws_restJson1PixelAnomaly(output.PixelAnomaly, context) : undefined,
2507+
} as any;
2508+
};
2509+
2510+
const deserializeAws_restJson1AnomalyList = (output: any, context: __SerdeContext): Anomaly[] => {
2511+
const retVal = (output || [])
2512+
.filter((e: any) => e != null)
2513+
.map((entry: any) => {
2514+
if (entry === null) {
2515+
return null as any;
2516+
}
2517+
return deserializeAws_restJson1Anomaly(entry, context);
2518+
});
2519+
return retVal;
2520+
};
2521+
25002522
const deserializeAws_restJson1DatasetDescription = (output: any, context: __SerdeContext): DatasetDescription => {
25012523
return {
25022524
CreationTimestamp:
@@ -2563,6 +2585,8 @@ const deserializeAws_restJson1DatasetMetadataList = (output: any, context: __Ser
25632585

25642586
const deserializeAws_restJson1DetectAnomalyResult = (output: any, context: __SerdeContext): DetectAnomalyResult => {
25652587
return {
2588+
Anomalies: output.Anomalies != null ? deserializeAws_restJson1AnomalyList(output.Anomalies, context) : undefined,
2589+
AnomalyMask: output.AnomalyMask != null ? context.base64Decoder(output.AnomalyMask) : undefined,
25662590
Confidence: __limitedParseFloat32(output.Confidence),
25672591
IsAnomalous: __expectBoolean(output.IsAnomalous),
25682592
Source: output.Source != null ? deserializeAws_restJson1ImageSource(output.Source, context) : undefined,
@@ -2783,6 +2807,13 @@ const deserializeAws_restJson1OutputS3Object = (output: any, context: __SerdeCon
27832807
} as any;
27842808
};
27852809

2810+
const deserializeAws_restJson1PixelAnomaly = (output: any, context: __SerdeContext): PixelAnomaly => {
2811+
return {
2812+
Color: __expectString(output.Color),
2813+
TotalPercentageArea: __limitedParseFloat32(output.TotalPercentageArea),
2814+
} as any;
2815+
};
2816+
27862817
const deserializeAws_restJson1ProjectDescription = (output: any, context: __SerdeContext): ProjectDescription => {
27872818
return {
27882819
CreationTimestamp:

0 commit comments

Comments
 (0)