diff --git a/models/public/Sphereface/accuracy-check.yml b/models/public/Sphereface/accuracy-check.yml index 8c95be00d41..024365cf17a 100644 --- a/models/public/Sphereface/accuracy-check.yml +++ b/models/public/Sphereface/accuracy-check.yml @@ -5,6 +5,13 @@ models: - framework: openvino adapter: reid + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 112, 96 + adapter: reid + datasets: - name: lfw diff --git a/models/public/aclnet-int8/accuracy-check.yml b/models/public/aclnet-int8/accuracy-check.yml index 1af35ab2bc9..f045bfaad2b 100644 --- a/models/public/aclnet-int8/accuracy-check.yml +++ b/models/public/aclnet-int8/accuracy-check.yml @@ -5,6 +5,13 @@ models: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: result.1 + type: INPUT + shape: 1, 1, 16000 + adapter: classification + datasets: - name: sound_classification diff --git a/models/public/aclnet/accuracy-check.yml b/models/public/aclnet/accuracy-check.yml index 21f6089f172..8dd79fa55f4 100644 --- a/models/public/aclnet/accuracy-check.yml +++ b/models/public/aclnet/accuracy-check.yml @@ -5,6 +5,13 @@ models: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: input + type: INPUT + shape: 1, 1, 16000 + adapter: classification + datasets: - name: sound_classification diff --git a/models/public/alexnet/accuracy-check.yml b/models/public/alexnet/accuracy-check.yml index 6b19ac44343..17ff0cbac69 100644 --- a/models/public/alexnet/accuracy-check.yml +++ b/models/public/alexnet/accuracy-check.yml @@ -28,6 +28,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 227, 227 + adapter: classification datasets: - name: imagenet_1000_classes diff --git a/models/public/anti-spoof-mn3/accuracy-check.yml b/models/public/anti-spoof-mn3/accuracy-check.yml index f764b120cad..240e3bd210c 100644 --- a/models/public/anti-spoof-mn3/accuracy-check.yml +++ b/models/public/anti-spoof-mn3/accuracy-check.yml @@ -5,6 +5,13 @@ models: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: actual_input_1 + type: INPUT + shape: 3, 128, 128 + adapter: classification + datasets: - name: antispoof_test_data diff --git a/models/public/background-matting-mobilenetv2/accuracy-check.yml b/models/public/background-matting-mobilenetv2/accuracy-check.yml index 724c3fb72a7..da31a52d50d 100644 --- a/models/public/background-matting-mobilenetv2/accuracy-check.yml +++ b/models/public/background-matting-mobilenetv2/accuracy-check.yml @@ -15,6 +15,21 @@ models: type: INPUT value: bgr* + - framework: opencv + adapter: + type: background_matting_with_pha_and_fgr + alpha_out: pha + foreground_out: fgr + inputs: + - name: src + type: INPUT + value: com* + shape: 3, 720, 1280 + - name: bgr + type: INPUT + value: bgr* + shape: 3, 720, 1280 + datasets: - name: HumanMattingClips120 reader: pillow_imread diff --git a/models/public/bert-base-ner/accuracy-check.yml b/models/public/bert-base-ner/accuracy-check.yml index 12fb17e5109..ba5f9d4ddbb 100644 --- a/models/public/bert-base-ner/accuracy-check.yml +++ b/models/public/bert-base-ner/accuracy-check.yml @@ -13,6 +13,22 @@ models: - name: 'token_type_ids' type: INPUT value: 'segment_ids' + - framework: opencv + inputs: + - name: "input_ids" + type: INPUT + value: "input_ids" + shape: 128 + - name: "attention_mask" + type: INPUT + value: 'input_mask' + shape: 128 + - name: 'token_type_ids' + type: INPUT + value: 'segment_ids' + shape: 128 + adapter: bert_ner + datasets: - name: CONLL2003_bert_cased reader: diff --git a/models/public/brain-tumor-segmentation-0001/accuracy-check.yml b/models/public/brain-tumor-segmentation-0001/accuracy-check.yml index c6a1ea1755b..cef22b2ae38 100644 --- a/models/public/brain-tumor-segmentation-0001/accuracy-check.yml +++ b/models/public/brain-tumor-segmentation-0001/accuracy-check.yml @@ -7,6 +7,15 @@ models: type: brain_tumor_segmentation make_argmax: True + - framework: opencv + inputs: + - name: data_crop + type: INPUT + shape: 4, 128, 128, 128 + adapter: + type: brain_tumor_segmentation + make_argmax: True + datasets: - name: BraTS diff --git a/models/public/brain-tumor-segmentation-0002/accuracy-check.yml b/models/public/brain-tumor-segmentation-0002/accuracy-check.yml index 2c9b4367965..50cd420bc42 100644 --- a/models/public/brain-tumor-segmentation-0002/accuracy-check.yml +++ b/models/public/brain-tumor-segmentation-0002/accuracy-check.yml @@ -5,6 +5,13 @@ models: - framework: openvino adapter: type: brain_tumor_segmentation + - framework: opencv + inputs: + - name: '0' + type: INPUT + shape: 4, 128, 128, 128 + adapter: + type: brain_tumor_segmentation datasets: - name: BraTS_2017 diff --git a/models/public/caffenet/accuracy-check.yml b/models/public/caffenet/accuracy-check.yml index c1177439159..5e1b1e45485 100644 --- a/models/public/caffenet/accuracy-check.yml +++ b/models/public/caffenet/accuracy-check.yml @@ -28,6 +28,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 227, 227 + adapter: classification datasets: - name: imagenet_1000_classes diff --git a/models/public/cocosnet/accuracy-check.yml b/models/public/cocosnet/accuracy-check.yml index c515a05119d..154411be571 100644 --- a/models/public/cocosnet/accuracy-check.yml +++ b/models/public/cocosnet/accuracy-check.yml @@ -22,6 +22,21 @@ evaluations: type: INPUT value: "(.*)ADE(.*)train(.*).png" + - framework: opencv + inputs: + - name: "input_seg_map" + type: INPUT + value: "(.*)ADE(.*)val(.*).png" + shape: 151, 256, 256 + - name: "ref_image" + type: INPUT + value: "(.*)train(.*).jpg" + shape: 3, 256, 256 + - name: "ref_seg_map" + type: INPUT + value: "(.*)ADE(.*)train(.*).png" + shape: 151, 256, 256 + datasets: - name: cocosnet_image_translation diff --git a/models/public/colorization-siggraph/accuracy-check.yml b/models/public/colorization-siggraph/accuracy-check.yml index abdf68101cd..daa5631e497 100644 --- a/models/public/colorization-siggraph/accuracy-check.yml +++ b/models/public/colorization-siggraph/accuracy-check.yml @@ -15,6 +15,24 @@ models: type: CONST_INPUT value: 0 shape: [1, 1, 256, 256] + - framework: opencv + inputs: + - name: data_l + type: INPUT + shape: 1, 256, 256 + - name: "user_ab" + type: CONST_INPUT + value: 0 + shape: [2, 256, 256] + - name: "user_map" + type: CONST_INPUT + value: 0 + shape: [1, 256, 256] + adapter: + type: image_processing + std: 1 + cast_to_uint8: false + datasets: - name: imagenet_colorization subsample_size: 2000 diff --git a/models/public/colorization-v2/accuracy-check.yml b/models/public/colorization-v2/accuracy-check.yml index 1218649835b..f3aa27c7dc4 100644 --- a/models/public/colorization-v2/accuracy-check.yml +++ b/models/public/colorization-v2/accuracy-check.yml @@ -6,6 +6,16 @@ models: type: image_processing std: 1 cast_to_uint8: false + - framework: opencv + inputs: + - name: data_l + type: INPUT + shape: 1, 256, 256 + adapter: + type: image_processing + std: 1 + cast_to_uint8: false + datasets: - name: imagenet_colorization subsample_size: 2000 diff --git a/models/public/common-sign-language-0001/accuracy-check.yml b/models/public/common-sign-language-0001/accuracy-check.yml index eb871584f21..dbcd3858764 100644 --- a/models/public/common-sign-language-0001/accuracy-check.yml +++ b/models/public/common-sign-language-0001/accuracy-check.yml @@ -9,6 +9,14 @@ models: name: input layout: NCDHW + - framework: opencv + inputs: + - name: input + type: INPUT + layout: NCDHW + shape: 3, 8, 224, 224 + adapter: classification + datasets: - name: jester diff --git a/models/public/ctdet_coco_dlav0_512/accuracy-check.yml b/models/public/ctdet_coco_dlav0_512/accuracy-check.yml index d6e682a9dd0..c9b02b5398e 100644 --- a/models/public/ctdet_coco_dlav0_512/accuracy-check.yml +++ b/models/public/ctdet_coco_dlav0_512/accuracy-check.yml @@ -8,6 +8,16 @@ models: width_height_out: width_height regression_out: regression + - framework: opencv + inputs: + - name: input.1 + type: INPUT + shape: 3, 512, 512 + adapter: + type: ctdet + center_heatmap_out: center_heatmap + width_height_out: width_height + regression_out: regression datasets: - name: ms_coco_detection_80_class_without_background diff --git a/models/public/ctpn/accuracy-check.yml b/models/public/ctpn/accuracy-check.yml index 1e62ca1460f..fe8a1d9de22 100644 --- a/models/public/ctpn/accuracy-check.yml +++ b/models/public/ctpn/accuracy-check.yml @@ -8,6 +8,17 @@ models: bbox_pred_out: 'rpn_bbox_pred/Reshape_1' allow_reshape_input: True + - framework: opencv + inputs: + - name: Placeholder + type: INPUT + shape: 3, 600, 600 + adapter: + type: ctpn_text_detection + cls_prob_out: 'Reshape_2' + bbox_pred_out: 'rpn_bbox_pred/Reshape_1' + allow_reshape_input: True + datasets: - name: ICDAR2013_detection diff --git a/models/public/deblurgan-v2/accuracy-check.yml b/models/public/deblurgan-v2/accuracy-check.yml index 7e8543a1e29..01cf794787a 100644 --- a/models/public/deblurgan-v2/accuracy-check.yml +++ b/models/public/deblurgan-v2/accuracy-check.yml @@ -41,6 +41,14 @@ models: adapter: type: image_processing reverse_channels: True + - framework: opencv + inputs: + - name: blur_image + type: INPUT + shape: 3, 736, 1312 + adapter: + type: image_processing + reverse_channels: True datasets: - name: GoPro diff --git a/models/public/deeplabv3/accuracy-check.yml b/models/public/deeplabv3/accuracy-check.yml index af1d173a2d1..72555502e77 100644 --- a/models/public/deeplabv3/accuracy-check.yml +++ b/models/public/deeplabv3/accuracy-check.yml @@ -4,6 +4,13 @@ models: - framework: openvino adapter: segmentation + - framework: opencv + inputs: + - name: mul_1/placeholder_port_1 + type: INPUT + shape: 3, 513, 513 + adapter: segmentation + datasets: - name: VOC2012_Segmentation preprocessing: diff --git a/models/public/densenet-121-tf/accuracy-check.yml b/models/public/densenet-121-tf/accuracy-check.yml index 75f30202944..6eca21b760a 100644 --- a/models/public/densenet-121-tf/accuracy-check.yml +++ b/models/public/densenet-121-tf/accuracy-check.yml @@ -4,6 +4,13 @@ models: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: input_1 + type: INPUT + shape: 3, 224, 224 + adapter: classification + datasets: - name: imagenet_1000_classes preprocessing: diff --git a/models/public/densenet-121/accuracy-check.yml b/models/public/densenet-121/accuracy-check.yml index 02b33128f39..44b9040e5f3 100644 --- a/models/public/densenet-121/accuracy-check.yml +++ b/models/public/densenet-121/accuracy-check.yml @@ -29,6 +29,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 224, 224 + adapter: classification datasets: - name: imagenet_1000_classes diff --git a/models/public/detr-resnet50/accuracy-check.yml b/models/public/detr-resnet50/accuracy-check.yml index fea901afce7..aca1031918f 100644 --- a/models/public/detr-resnet50/accuracy-check.yml +++ b/models/public/detr-resnet50/accuracy-check.yml @@ -33,6 +33,15 @@ models: type: detr scores_out: "scores" boxes_out: "boxes" + - framework: opencv + inputs: + - name: input + type: INPUT + shape: 3, 800, 1137 + adapter: + type: detr + scores_out: "scores" + boxes_out: "boxes" datasets: - name: ms_coco_detection_91_classes reader: pillow_imread diff --git a/models/public/dla-34/accuracy-check.yml b/models/public/dla-34/accuracy-check.yml index a878910530a..79b1124f9ac 100644 --- a/models/public/dla-34/accuracy-check.yml +++ b/models/public/dla-34/accuracy-check.yml @@ -41,6 +41,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 224, 224 + adapter: classification datasets: - name: imagenet_1000_classes diff --git a/models/public/drn-d-38/accuracy-check.yml b/models/public/drn-d-38/accuracy-check.yml index 812b6bd0e67..835ecb65093 100644 --- a/models/public/drn-d-38/accuracy-check.yml +++ b/models/public/drn-d-38/accuracy-check.yml @@ -5,6 +5,15 @@ models: adapter: type: segmentation make_argmax: true + - framework: opencv + inputs: + - name: input + type: INPUT + shape: 3, 1024, 2048 + adapter: + type: segmentation + make_argmax: true + datasets: - name: cityscapes metrics: diff --git a/models/public/efficientdet-d0-tf/accuracy-check.yml b/models/public/efficientdet-d0-tf/accuracy-check.yml index be4a693f90a..8428eaba8a0 100644 --- a/models/public/efficientdet-d0-tf/accuracy-check.yml +++ b/models/public/efficientdet-d0-tf/accuracy-check.yml @@ -3,6 +3,13 @@ models: launchers: - framework: openvino adapter: ssd + - framework: opencv + inputs: + - name: image_arrays/placeholder_port_0 + type: INPUT + shape: 3, 512, 512 + adapter: ssd + datasets: - name: ms_coco_detection_90_class_without_background preprocessing: diff --git a/models/public/efficientdet-d1-tf/accuracy-check.yml b/models/public/efficientdet-d1-tf/accuracy-check.yml index 5b8db5405cf..cf8f4d5dca8 100644 --- a/models/public/efficientdet-d1-tf/accuracy-check.yml +++ b/models/public/efficientdet-d1-tf/accuracy-check.yml @@ -3,6 +3,13 @@ models: launchers: - framework: openvino adapter: ssd + - framework: opencv + inputs: + - name: image_arrays/placeholder_port_0 + type: INPUT + shape: 3, 640, 640 + adapter: ssd + datasets: - name: ms_coco_detection_90_class_without_background preprocessing: diff --git a/models/public/efficientnet-b0-pytorch/accuracy-check.yml b/models/public/efficientnet-b0-pytorch/accuracy-check.yml index 9bbf5458869..2d030794bea 100644 --- a/models/public/efficientnet-b0-pytorch/accuracy-check.yml +++ b/models/public/efficientnet-b0-pytorch/accuracy-check.yml @@ -9,8 +9,6 @@ models: - name: data type: INPUT shape: 1,3,224,224 - - datasets: - name: imagenet_1000_classes reader: pillow_imread @@ -44,6 +42,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 224, 224 + adapter: classification datasets: - name: imagenet_1000_classes diff --git a/models/public/efficientnet-b0/accuracy-check.yml b/models/public/efficientnet-b0/accuracy-check.yml index 3c9b7166008..fbea5ae9bf7 100644 --- a/models/public/efficientnet-b0/accuracy-check.yml +++ b/models/public/efficientnet-b0/accuracy-check.yml @@ -37,6 +37,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: sub/placeholder_port_0 + type: INPUT + shape: 3, 224, 224 + adapter: classification datasets: - name: imagenet_1000_classes diff --git a/models/public/efficientnet-v2-b0/accuracy-check.yml b/models/public/efficientnet-v2-b0/accuracy-check.yml index 160df96100a..2f3551ab03a 100644 --- a/models/public/efficientnet-v2-b0/accuracy-check.yml +++ b/models/public/efficientnet-v2-b0/accuracy-check.yml @@ -40,6 +40,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: input + type: INPUT + shape: 3, 224, 224 + adapter: classification datasets: - name: imagenet_1000_classes diff --git a/models/public/efficientnet-v2-s/accuracy-check.yml b/models/public/efficientnet-v2-s/accuracy-check.yml index 9b1b2537067..74b2024bd04 100644 --- a/models/public/efficientnet-v2-s/accuracy-check.yml +++ b/models/public/efficientnet-v2-s/accuracy-check.yml @@ -40,6 +40,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: input + type: INPUT + shape: 3, 384, 384 + adapter: classification datasets: - name: imagenet_1000_classes diff --git a/models/public/f3net/accuracy-check.yml b/models/public/f3net/accuracy-check.yml index da666ca99f5..eba90eb5f78 100644 --- a/models/public/f3net/accuracy-check.yml +++ b/models/public/f3net/accuracy-check.yml @@ -27,6 +27,12 @@ models: launchers: - framework: openvino adapter: salient_object_detection + - framework: opencv + inputs: + - name: input.1 + type: INPUT + shape: 3, 352, 352 + adapter: salient_object_detection datasets: - name: PASCAL-S preprocessing: diff --git a/models/public/face-detection-retail-0044/accuracy-check.yml b/models/public/face-detection-retail-0044/accuracy-check.yml index 6b81cf4141d..c9414bd7a30 100644 --- a/models/public/face-detection-retail-0044/accuracy-check.yml +++ b/models/public/face-detection-retail-0044/accuracy-check.yml @@ -10,6 +10,13 @@ models: - framework: openvino adapter: ssd + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 300, 300 + adapter: ssd + datasets: - name: wider diff --git a/models/public/face-recognition-resnet100-arcface-onnx/accuracy-check.yml b/models/public/face-recognition-resnet100-arcface-onnx/accuracy-check.yml index 546c372df4e..3393a83e708 100644 --- a/models/public/face-recognition-resnet100-arcface-onnx/accuracy-check.yml +++ b/models/public/face-recognition-resnet100-arcface-onnx/accuracy-check.yml @@ -5,6 +5,13 @@ models: - framework: openvino adapter: reid + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 112, 112 + adapter: reid + datasets: - name: lfw_bin diff --git a/models/public/faceboxes-pytorch/accuracy-check.yml b/models/public/faceboxes-pytorch/accuracy-check.yml index 3498afa3be7..ce9f758e85b 100644 --- a/models/public/faceboxes-pytorch/accuracy-check.yml +++ b/models/public/faceboxes-pytorch/accuracy-check.yml @@ -8,6 +8,16 @@ models: boxes_out: "boxes" scores_out: "scores" + - framework: opencv + inputs: + - name: input.1 + type: INPUT + shape: 3, 1024, 1024 + adapter: + type: faceboxes + boxes_out: "boxes" + scores_out: "scores" + datasets: - name: wider diff --git a/models/public/facenet-20180408-102900/accuracy-check.yml b/models/public/facenet-20180408-102900/accuracy-check.yml index a71448b6acd..df7675d45d0 100644 --- a/models/public/facenet-20180408-102900/accuracy-check.yml +++ b/models/public/facenet-20180408-102900/accuracy-check.yml @@ -7,6 +7,15 @@ models: type: reid joining_method: concatenation + - framework: opencv + inputs: + - name: image_batch/placeholder_port_0 + type: INPUT + shape: 3, 160, 160 + adapter: + type: reid + joining_method: concatenation + datasets: - name: lfw_mtcnn_align diff --git a/models/public/fast-neural-style-mosaic-onnx/accuracy-check.yml b/models/public/fast-neural-style-mosaic-onnx/accuracy-check.yml index 5985e84d684..85200e0af57 100644 --- a/models/public/fast-neural-style-mosaic-onnx/accuracy-check.yml +++ b/models/public/fast-neural-style-mosaic-onnx/accuracy-check.yml @@ -10,6 +10,14 @@ models: adapter: type: style_transfer + - framework: opencv + inputs: + - name: input1 + type: INPUT + shape: 3, 224, 224 + adapter: + type: style_transfer + datasets: - name: ms_coco_style_transfer preprocessing: diff --git a/models/public/faster_rcnn_inception_resnet_v2_atrous_coco/accuracy-check.yml b/models/public/faster_rcnn_inception_resnet_v2_atrous_coco/accuracy-check.yml index c66a1969bca..611e85fe491 100644 --- a/models/public/faster_rcnn_inception_resnet_v2_atrous_coco/accuracy-check.yml +++ b/models/public/faster_rcnn_inception_resnet_v2_atrous_coco/accuracy-check.yml @@ -8,6 +8,17 @@ models: type: CONST_INPUT value: [[600, 1024, 1]] + - framework: opencv + inputs: + - name: image_info + type: CONST_INPUT + value: [[600, 1024, 1]] + shape: 3 + - name: image_tensor + type: INPUT + shape: 3, 600, 1024 + adapter: ssd + datasets: - name: ms_coco_detection_91_classes metrics: diff --git a/models/public/faster_rcnn_resnet50_coco/accuracy-check.yml b/models/public/faster_rcnn_resnet50_coco/accuracy-check.yml index 9752c8220f8..d80d473a00f 100644 --- a/models/public/faster_rcnn_resnet50_coco/accuracy-check.yml +++ b/models/public/faster_rcnn_resnet50_coco/accuracy-check.yml @@ -1,5 +1,5 @@ models: - - name: faster_rcnn_resnet50_coco + - name: faster_rcnn_resnet50_coco launchers: - framework: openvino adapter: ssd @@ -8,6 +8,17 @@ models: type: CONST_INPUT value: [[600, 1024, 1]] + - framework: opencv + inputs: + - name: image_info + type: CONST_INPUT + value: [[600, 1024, 1]] + shape: 3 + - name: image_tensor + type: INPUT + shape: 3, 600, 1024 + adapter: ssd + datasets: - name: ms_coco_detection_91_classes metrics: diff --git a/models/public/fastseg-large/accuracy-check.yml b/models/public/fastseg-large/accuracy-check.yml index 342ffd0fee2..5bd68eda82d 100644 --- a/models/public/fastseg-large/accuracy-check.yml +++ b/models/public/fastseg-large/accuracy-check.yml @@ -6,6 +6,15 @@ models: type: segmentation make_argmax: True + - framework: opencv + inputs: + - name: input0 + type: INPUT + shape: 3, 1024, 2048 + adapter: + type: segmentation + make_argmax: true + datasets: - name: cityscapes preprocessing: diff --git a/models/public/fastseg-small/accuracy-check.yml b/models/public/fastseg-small/accuracy-check.yml index f6bf954228c..9b06cc3dcf6 100644 --- a/models/public/fastseg-small/accuracy-check.yml +++ b/models/public/fastseg-small/accuracy-check.yml @@ -6,6 +6,15 @@ models: type: segmentation make_argmax: True + - framework: opencv + inputs: + - name: input0 + type: INPUT + shape: 3, 1024, 2048 + adapter: + type: segmentation + make_argmax: True + datasets: - name: cityscapes preprocessing: diff --git a/models/public/fbcnn/accuracy-check.yml b/models/public/fbcnn/accuracy-check.yml index f1f1a7af9ef..8f378a5cf53 100644 --- a/models/public/fbcnn/accuracy-check.yml +++ b/models/public/fbcnn/accuracy-check.yml @@ -8,6 +8,16 @@ models: reverse_channels: True allow_reshape_input: True + - framework: opencv + inputs: + - name: image_lq + type: INPUT + shape: 3, 512, 512 + adapter: + type: image_processing + reverse_channels: True + allow_reshape_input: True + datasets: - name: LIVE_1 preprocessing: diff --git a/models/public/fcrn-dp-nyu-depth-v2-tf/accuracy-check.yml b/models/public/fcrn-dp-nyu-depth-v2-tf/accuracy-check.yml index 4418b5e1314..4cffbba1d69 100644 --- a/models/public/fcrn-dp-nyu-depth-v2-tf/accuracy-check.yml +++ b/models/public/fcrn-dp-nyu-depth-v2-tf/accuracy-check.yml @@ -3,6 +3,13 @@ models: launchers: - framework: openvino adapter: mono_depth + - framework: opencv + inputs: + - name: Placeholder + type: INPUT + shape: 3, 228, 304 + adapter: mono_depth + datasets: - name: NYU_Depth_V2 preprocessing: diff --git a/models/public/forward-tacotron/forward-tacotron-duration-prediction/accuracy-check.yml b/models/public/forward-tacotron/forward-tacotron-duration-prediction/accuracy-check.yml index ea52c9b318f..9c2a704d500 100644 --- a/models/public/forward-tacotron/forward-tacotron-duration-prediction/accuracy-check.yml +++ b/models/public/forward-tacotron/forward-tacotron-duration-prediction/accuracy-check.yml @@ -7,6 +7,17 @@ models: outputs: - duration - embeddings + - framework: opencv + inputs: + - name: input_seq + type: INPUT + shape: 241 + adapter: + type: multi_output_regression + outputs: + - duration + - embeddings + datasets: - name: tacotron-io-duration-prediction metrics: diff --git a/models/public/forward-tacotron/forward-tacotron-regression/accuracy-check.yml b/models/public/forward-tacotron/forward-tacotron-regression/accuracy-check.yml index c3b90641fad..747845c04ae 100644 --- a/models/public/forward-tacotron/forward-tacotron-regression/accuracy-check.yml +++ b/models/public/forward-tacotron/forward-tacotron-regression/accuracy-check.yml @@ -10,6 +10,18 @@ models: - name: 'data' type: INPUT layout: NHWC + + - framework: opencv + inputs: + - name: 'data' + type: INPUT + layout: NHWC + shape: 805, 512 + adapter: + type: multi_output_regression + outputs: + - mel + datasets: - name: tacotron-io-regression metrics: diff --git a/models/public/gmcnn-places2-tf/accuracy-check.yml b/models/public/gmcnn-places2-tf/accuracy-check.yml index 51b1e730d44..ab06358a2b9 100644 --- a/models/public/gmcnn-places2-tf/accuracy-check.yml +++ b/models/public/gmcnn-places2-tf/accuracy-check.yml @@ -40,6 +40,17 @@ models: - name: "Placeholder_1" type: INPUT value: '.*mask' + - framework: opencv + inputs: + - name: "Placeholder" + type: INPUT + value: '.*image' + shape: 3, 512, 680 + - name: "Placeholder_1" + type: INPUT + value: '.*mask' + shape: 1, 512, 680 + adapter: inpainting datasets: - name: inpainting_rect_masks diff --git a/models/public/googlenet-v1-tf/accuracy-check.yml b/models/public/googlenet-v1-tf/accuracy-check.yml index 8c55a06c1bc..3c8d41b9e3f 100644 --- a/models/public/googlenet-v1-tf/accuracy-check.yml +++ b/models/public/googlenet-v1-tf/accuracy-check.yml @@ -30,6 +30,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: input + type: INPUT + shape: 3, 224, 224 + adapter: classification datasets: - name: imagenet_1001_classes diff --git a/models/public/googlenet-v1/accuracy-check.yml b/models/public/googlenet-v1/accuracy-check.yml index 4261c27bdb7..cdfad0aeb78 100644 --- a/models/public/googlenet-v1/accuracy-check.yml +++ b/models/public/googlenet-v1/accuracy-check.yml @@ -29,6 +29,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 224, 224 + adapter: classification datasets: - name: imagenet_1000_classes diff --git a/models/public/googlenet-v2-tf/accuracy-check.yml b/models/public/googlenet-v2-tf/accuracy-check.yml index a75503465d9..27ea7fbec9d 100644 --- a/models/public/googlenet-v2-tf/accuracy-check.yml +++ b/models/public/googlenet-v2-tf/accuracy-check.yml @@ -4,6 +4,13 @@ models: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: input + type: INPUT + shape: 3, 224, 224 + adapter: classification + datasets: - name: imagenet_1001_classes preprocessing: diff --git a/models/public/googlenet-v2/accuracy-check.yml b/models/public/googlenet-v2/accuracy-check.yml index 75000cb3963..85bf20f7757 100644 --- a/models/public/googlenet-v2/accuracy-check.yml +++ b/models/public/googlenet-v2/accuracy-check.yml @@ -29,6 +29,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 224, 224 + adapter: classification datasets: - name: imagenet_1000_classes_2015 diff --git a/models/public/googlenet-v3-pytorch/accuracy-check.yml b/models/public/googlenet-v3-pytorch/accuracy-check.yml index 9b0cf77d447..7f96d3eb8de 100644 --- a/models/public/googlenet-v3-pytorch/accuracy-check.yml +++ b/models/public/googlenet-v3-pytorch/accuracy-check.yml @@ -43,6 +43,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 299, 299 + adapter: classification datasets: - name: imagenet_1000_classes diff --git a/models/public/googlenet-v3/accuracy-check.yml b/models/public/googlenet-v3/accuracy-check.yml index 2ba0a987692..92dd34f6a0f 100644 --- a/models/public/googlenet-v3/accuracy-check.yml +++ b/models/public/googlenet-v3/accuracy-check.yml @@ -31,6 +31,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: input + type: INPUT + shape: 3, 299, 299 + adapter: classification datasets: - name: imagenet_1001_classes diff --git a/models/public/googlenet-v4-tf/accuracy-check.yml b/models/public/googlenet-v4-tf/accuracy-check.yml index 48afb171682..8a3c5cde784 100644 --- a/models/public/googlenet-v4-tf/accuracy-check.yml +++ b/models/public/googlenet-v4-tf/accuracy-check.yml @@ -30,6 +30,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: input + type: INPUT + shape: 3, 299, 299 + adapter: classification datasets: - name: imagenet_1001_classes diff --git a/models/public/gpt-2/accuracy-check.yml b/models/public/gpt-2/accuracy-check.yml index 03034b3edd5..73e9d6cdefd 100644 --- a/models/public/gpt-2/accuracy-check.yml +++ b/models/public/gpt-2/accuracy-check.yml @@ -34,6 +34,16 @@ models: type: INPUT value: "input_ids" precision: I32 + - framework: opencv + inputs: + - name: "input" + type: INPUT + value: "input_ids" + precision: I32 + shape: 1024 + adapter: + type: common_language_modeling + logits_output: "output" datasets: - name: WikiText_2_raw_gpt2 diff --git a/models/public/hbonet-0.25/accuracy-check.yml b/models/public/hbonet-0.25/accuracy-check.yml index 6524903a39c..0e1d0bee03a 100644 --- a/models/public/hbonet-0.25/accuracy-check.yml +++ b/models/public/hbonet-0.25/accuracy-check.yml @@ -48,6 +48,12 @@ models: - framework: openvino adapter: type: classification + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 224, 224 + adapter: classification datasets: - name: imagenet_1000_classes diff --git a/models/public/hbonet-1.0/accuracy-check.yml b/models/public/hbonet-1.0/accuracy-check.yml index 068534a5d68..728e41a38f4 100644 --- a/models/public/hbonet-1.0/accuracy-check.yml +++ b/models/public/hbonet-1.0/accuracy-check.yml @@ -48,6 +48,12 @@ models: - framework: openvino adapter: type: classification + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 224, 224 + adapter: classification datasets: - name: imagenet_1000_classes diff --git a/models/public/higher-hrnet-w32-human-pose-estimation/accuracy-check.yml b/models/public/higher-hrnet-w32-human-pose-estimation/accuracy-check.yml index 0c9a830e22f..fb6ebc62c2c 100644 --- a/models/public/higher-hrnet-w32-human-pose-estimation/accuracy-check.yml +++ b/models/public/higher-hrnet-w32-human-pose-estimation/accuracy-check.yml @@ -10,6 +10,18 @@ models: heatmaps_out: heatmaps nms_heatmaps_out: heatmaps + - framework: opencv + inputs: + - name: image + type: INPUT + shape: 3, 512, 512 + allow_reshape_input: true + adapter: + type: human_pose_estimation_hrnet + embeddings_out: embeddings + heatmaps_out: heatmaps + nms_heatmaps_out: heatmaps + datasets: - name: ms_coco_val2017_keypoints_5k_images diff --git a/models/public/hrnet-v2-c1-segmentation/accuracy-check.yml b/models/public/hrnet-v2-c1-segmentation/accuracy-check.yml index fcc0ee6b590..af8cb928fec 100644 --- a/models/public/hrnet-v2-c1-segmentation/accuracy-check.yml +++ b/models/public/hrnet-v2-c1-segmentation/accuracy-check.yml @@ -38,6 +38,12 @@ models: launchers: - framework: openvino adapter: segmentation + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 320, 320 + adapter: segmentation datasets: - name: ADEChallengeData2016 diff --git a/models/public/human-pose-estimation-3d-0001/accuracy-check.yml b/models/public/human-pose-estimation-3d-0001/accuracy-check.yml index 83d04519ece..f3fa4135931 100644 --- a/models/public/human-pose-estimation-3d-0001/accuracy-check.yml +++ b/models/public/human-pose-estimation-3d-0001/accuracy-check.yml @@ -10,6 +10,18 @@ models: keypoints_heatmap_out: heatmaps part_affinity_fields_out: pafs + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 256, 448 + adapter: + type: human_pose_estimation_3d + features_3d_out: features + keypoints_heatmap_out: heatmaps + part_affinity_fields_out: pafs + allow_reshape_input: true + datasets: - name: cmu_panoptic_keypoints diff --git a/models/public/hybrid-cs-model-mri/accuracy-check.yml b/models/public/hybrid-cs-model-mri/accuracy-check.yml index 04641f97a79..fe8c1d381e1 100644 --- a/models/public/hybrid-cs-model-mri/accuracy-check.yml +++ b/models/public/hybrid-cs-model-mri/accuracy-check.yml @@ -8,6 +8,16 @@ models: std: 1 cast_to_uint8: False + - framework: opencv + inputs: + - name: input_1 + type: INPUT + shape: 2, 256, 256 + adapter: + type: image_processing + std: 1 + cast_to_uint8: False + datasets: - name: CalgaryCampinasSingleChannel_20perc_downsample postprocessing: diff --git a/models/public/i3d-rgb-tf/accuracy-check.yml b/models/public/i3d-rgb-tf/accuracy-check.yml index f7bef288bfd..22ea3fdd15c 100644 --- a/models/public/i3d-rgb-tf/accuracy-check.yml +++ b/models/public/i3d-rgb-tf/accuracy-check.yml @@ -8,6 +8,14 @@ models: type: INPUT layout: NDHWC + - framework: opencv + inputs: + - name: Placeholder + type: INPUT + layout: NCDHW + shape: 3, 79, 224, 224 + adapter: classification + datasets: - name: kinetics-400-frames-79-400 data_source: kinetics/frames_val diff --git a/models/public/inception-resnet-v2-tf/accuracy-check.yml b/models/public/inception-resnet-v2-tf/accuracy-check.yml index 785f8a702af..b8dd47091d5 100644 --- a/models/public/inception-resnet-v2-tf/accuracy-check.yml +++ b/models/public/inception-resnet-v2-tf/accuracy-check.yml @@ -31,6 +31,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: input + type: INPUT + shape: 3, 299, 299 + adapter: classification datasets: - name: imagenet_1001_classes diff --git a/models/public/license-plate-recognition-barrier-0007/accuracy-check.yml b/models/public/license-plate-recognition-barrier-0007/accuracy-check.yml index 5b91c81ddc4..1571d7a3e80 100644 --- a/models/public/license-plate-recognition-barrier-0007/accuracy-check.yml +++ b/models/public/license-plate-recognition-barrier-0007/accuracy-check.yml @@ -5,6 +5,13 @@ models: - framework: openvino adapter: lpr + - framework: opencv + inputs: + - name: input + type: INPUT + shape: 3, 24, 94 + adapter: lpr + datasets: - name: synthetic_chinese_license_plates diff --git a/models/public/mask_rcnn_inception_resnet_v2_atrous_coco/accuracy-check.yml b/models/public/mask_rcnn_inception_resnet_v2_atrous_coco/accuracy-check.yml index 2385055d28e..0eb7cab42e4 100644 --- a/models/public/mask_rcnn_inception_resnet_v2_atrous_coco/accuracy-check.yml +++ b/models/public/mask_rcnn_inception_resnet_v2_atrous_coco/accuracy-check.yml @@ -12,6 +12,20 @@ models: type: CONST_INPUT value: [[800, 1365, 1]] + - framework: opencv + inputs: + - name: image_info + type: CONST_INPUT + value: [[800, 1365, 1]] + shape: 3 + - name: image_tensor + type: INPUT + shape: 3, 800, 1365 + adapter: + type: mask_rcnn + detection_out: reshape_do_2d + raw_masks_out: masks + datasets: - name: ms_coco_mask_rcnn_short_91_classes diff --git a/models/public/mask_rcnn_resnet50_atrous_coco/accuracy-check.yml b/models/public/mask_rcnn_resnet50_atrous_coco/accuracy-check.yml index 14cedf3b104..84a05ac86f0 100644 --- a/models/public/mask_rcnn_resnet50_atrous_coco/accuracy-check.yml +++ b/models/public/mask_rcnn_resnet50_atrous_coco/accuracy-check.yml @@ -12,6 +12,20 @@ models: type: CONST_INPUT value: [[800, 1365, 1]] + - framework: opencv + inputs: + - name: image_info + type: CONST_INPUT + value: [[800, 1365, 1]] + shape: 3 + - name: image_tensor + type: INPUT + shape: 3, 800, 1365 + adapter: + type: mask_rcnn + detection_out: reshape_do_2d + raw_masks_out: masks + datasets: - name: ms_coco_mask_rcnn_short_91_classes metrics: diff --git a/models/public/midasnet/accuracy-check.yml b/models/public/midasnet/accuracy-check.yml index b67005a0a06..1707f9727a3 100644 --- a/models/public/midasnet/accuracy-check.yml +++ b/models/public/midasnet/accuracy-check.yml @@ -3,6 +3,13 @@ models: launchers: - framework: openvino adapter: mono_depth + - framework: opencv + inputs: + - name: image + type: INPUT + shape: 3, 384, 384 + adapter: mono_depth + datasets: - name: ReDWeb_V1 preprocessing: diff --git a/models/public/mixnet-l/accuracy-check.yml b/models/public/mixnet-l/accuracy-check.yml index 3f2d3e0c13a..9d4db5f7947 100644 --- a/models/public/mixnet-l/accuracy-check.yml +++ b/models/public/mixnet-l/accuracy-check.yml @@ -37,6 +37,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: IteratorGetNext/placeholder_out_port_0 + type: INPUT + shape: 3, 224, 224 + adapter: classification datasets: - name: imagenet_1000_classes diff --git a/models/public/mobilefacedet-v1-mxnet/accuracy-check.yml b/models/public/mobilefacedet-v1-mxnet/accuracy-check.yml index 68abc1577c5..c628356a3af 100644 --- a/models/public/mobilefacedet-v1-mxnet/accuracy-check.yml +++ b/models/public/mobilefacedet-v1-mxnet/accuracy-check.yml @@ -17,6 +17,23 @@ models: type: INPUT layout: NHWC + - framework: opencv + inputs: + - name: data + type: INPUT + layout: NHWC + shape: 256, 256, 3 + adapter: + type: yolo_v3 + classes: 1 + num: 9 + anchors: 10,12,16,20,23,29,43,54,60,75,80,106,118,157,186,248,285,379 + outputs: + - yolov30_yolooutputv32_conv0_fwd/YoloRegion + - yolov30_yolooutputv31_conv0_fwd/YoloRegion + - yolov30_yolooutputv30_conv0_fwd/YoloRegion + anchor_masks: [[0, 1, 2], [3, 4, 5], [6, 7, 8]] + datasets: - name: wider_without_bkgr diff --git a/models/public/mobilenet-ssd/accuracy-check.yml b/models/public/mobilenet-ssd/accuracy-check.yml index 87ad19b01a6..289948c26d9 100644 --- a/models/public/mobilenet-ssd/accuracy-check.yml +++ b/models/public/mobilenet-ssd/accuracy-check.yml @@ -26,6 +26,12 @@ models: launchers: - framework: openvino adapter: ssd + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 300, 300 + adapter: ssd datasets: - name: VOC2007_detection diff --git a/models/public/mobilenet-v1-0.25-128/accuracy-check.yml b/models/public/mobilenet-v1-0.25-128/accuracy-check.yml index 34303bd0281..33a0f67357c 100644 --- a/models/public/mobilenet-v1-0.25-128/accuracy-check.yml +++ b/models/public/mobilenet-v1-0.25-128/accuracy-check.yml @@ -31,6 +31,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: input + type: INPUT + shape: 3, 128, 128 + adapter: classification datasets: - name: imagenet_1001_classes diff --git a/models/public/mobilenet-v1-1.0-224-tf/accuracy-check.yml b/models/public/mobilenet-v1-1.0-224-tf/accuracy-check.yml index 6c9a256ca6c..f555cadfb60 100644 --- a/models/public/mobilenet-v1-1.0-224-tf/accuracy-check.yml +++ b/models/public/mobilenet-v1-1.0-224-tf/accuracy-check.yml @@ -31,6 +31,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: input + type: INPUT + shape: 3, 224, 224 + adapter: classification datasets: - name: imagenet_1001_classes diff --git a/models/public/mobilenet-v1-1.0-224/accuracy-check.yml b/models/public/mobilenet-v1-1.0-224/accuracy-check.yml index ee5d0822316..2885b5d6dec 100644 --- a/models/public/mobilenet-v1-1.0-224/accuracy-check.yml +++ b/models/public/mobilenet-v1-1.0-224/accuracy-check.yml @@ -29,6 +29,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 224, 224 + adapter: classification datasets: - name: imagenet_1000_classes diff --git a/models/public/mobilenet-v2-1.0-224/accuracy-check.yml b/models/public/mobilenet-v2-1.0-224/accuracy-check.yml index 6e9000486dd..5d6b3ed6281 100644 --- a/models/public/mobilenet-v2-1.0-224/accuracy-check.yml +++ b/models/public/mobilenet-v2-1.0-224/accuracy-check.yml @@ -32,6 +32,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: input + type: INPUT + shape: 3, 224, 224 + adapter: classification datasets: - name: imagenet_1001_classes diff --git a/models/public/mobilenet-v2-1.4-224/accuracy-check.yml b/models/public/mobilenet-v2-1.4-224/accuracy-check.yml index cf5d000d6dc..e7b3f218cb8 100644 --- a/models/public/mobilenet-v2-1.4-224/accuracy-check.yml +++ b/models/public/mobilenet-v2-1.4-224/accuracy-check.yml @@ -31,6 +31,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: input + type: INPUT + shape: 3, 224, 224 + adapter: classification datasets: - name: imagenet_1001_classes diff --git a/models/public/mobilenet-v2-pytorch/accuracy-check.yml b/models/public/mobilenet-v2-pytorch/accuracy-check.yml index daea22f4247..c02b41f3f43 100644 --- a/models/public/mobilenet-v2-pytorch/accuracy-check.yml +++ b/models/public/mobilenet-v2-pytorch/accuracy-check.yml @@ -53,6 +53,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 224, 224 + adapter: classification datasets: - name: imagenet_1000_classes diff --git a/models/public/mobilenet-v2/accuracy-check.yml b/models/public/mobilenet-v2/accuracy-check.yml index 37825aa7a4e..43da3bebb3b 100644 --- a/models/public/mobilenet-v2/accuracy-check.yml +++ b/models/public/mobilenet-v2/accuracy-check.yml @@ -29,6 +29,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 224, 224 + adapter: classification datasets: - name: imagenet_1000_classes diff --git a/models/public/mobilenet-v3-large-1.0-224-tf/accuracy-check.yml b/models/public/mobilenet-v3-large-1.0-224-tf/accuracy-check.yml index aec5252b96a..a7e8f2e532f 100644 --- a/models/public/mobilenet-v3-large-1.0-224-tf/accuracy-check.yml +++ b/models/public/mobilenet-v3-large-1.0-224-tf/accuracy-check.yml @@ -28,6 +28,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: input_1 + type: INPUT + shape: 3, 224, 224 + adapter: classification datasets: - name: imagenet_1000_classes diff --git a/models/public/mobilenet-v3-small-1.0-224-tf/accuracy-check.yml b/models/public/mobilenet-v3-small-1.0-224-tf/accuracy-check.yml index fd0a1875e1a..458f3656635 100644 --- a/models/public/mobilenet-v3-small-1.0-224-tf/accuracy-check.yml +++ b/models/public/mobilenet-v3-small-1.0-224-tf/accuracy-check.yml @@ -28,6 +28,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: input_1 + type: INPUT + shape: 3, 224, 224 + adapter: classification datasets: - name: imagenet_1000_classes diff --git a/models/public/mobilenet-yolo-v4-syg/accuracy-check.yml b/models/public/mobilenet-yolo-v4-syg/accuracy-check.yml index e5ae01f7277..aa2e7b38204 100644 --- a/models/public/mobilenet-yolo-v4-syg/accuracy-check.yml +++ b/models/public/mobilenet-yolo-v4-syg/accuracy-check.yml @@ -49,7 +49,7 @@ models: - type: map integral: 11point ignore_difficult: true - presenter: print_scalar + presenter: print_scalar reference: 0.8711 - name: mobilenet-yolo-v4-syg @@ -67,6 +67,23 @@ models: - separable_conv2d_22/separable_conv2d/YoloRegion - separable_conv2d_30/separable_conv2d/YoloRegion - separable_conv2d_38/separable_conv2d/YoloRegion + - framework: opencv + inputs: + - name: input_1 + type: INPUT + shape: 3, 416, 416 + adapter: + type: yolo_v3 + classes: 4 + anchors: 12,16, 19,36, 40,28, 36,75, 76,55, 72,146, 142,110, 192,243, 459,401 + coords: 4 + num: 9 + anchor_masks: [[0, 1, 2], [3, 4, 5], [6, 7, 8]] + threshold: 0.001 + outputs: + - separable_conv2d_22/separable_conv2d/YoloRegion + - separable_conv2d_30/separable_conv2d/YoloRegion + - separable_conv2d_38/separable_conv2d/YoloRegion datasets: - name: SYGData0829 preprocessing: diff --git a/models/public/mozilla-deepspeech-0.6.1/accuracy-check.yml b/models/public/mozilla-deepspeech-0.6.1/accuracy-check.yml index 05cb81cfe6e..8336486466e 100644 --- a/models/public/mozilla-deepspeech-0.6.1/accuracy-check.yml +++ b/models/public/mozilla-deepspeech-0.6.1/accuracy-check.yml @@ -13,7 +13,7 @@ models: lm_beta: 1.85 lm_oov_score: -1000. lm_vocabulary_offset: 941235601 - lm_vocabulary_length: 4463723 + lm_vocabulary_length: 4463723 inputs: - name: input_node type: INPUT @@ -24,6 +24,33 @@ models: - name: previous_state_h type: LSTM_INPUT value: 'cudnn_lstm/rnn/multi_rnn_cell/cell_0/cudnn_compatible_lstm_cell/GatherNd_1:0' + - framework: opencv + inputs: + - name: input_node + type: INPUT + layout: NHWC + shape: 16, 19, 26 + - name: previous_state_h + type: LSTM_INPUT + value: 'cudnn_lstm/rnn/multi_rnn_cell/cell_0/cudnn_compatible_lstm_cell/GatherNd_1:0' + shape: 2048 + - name: previous_state_c + type: LSTM_INPUT + value: 'cudnn_lstm/rnn/multi_rnn_cell/cell_0/cudnn_compatible_lstm_cell/GatherNd:0' + shape: 2048 + adapter: + type: ctc_beam_search_decoder_with_lm + probability_out: logits + logarithmic_prob: False + beam_size: 32 + # Use option "accuracy_check [...] --model_attributes " to provide path to lm.binary. + lm_file: mozilla-deepspeech-0.6.1/lm.binary + lm_alpha: 0.75 + lm_beta: 1.85 + lm_oov_score: -1000. + lm_vocabulary_offset: 941235601 + lm_vocabulary_length: 4463723 + datasets: - name: librispeech-test-clean reader: diff --git a/models/public/mozilla-deepspeech-0.8.2/accuracy-check.yml b/models/public/mozilla-deepspeech-0.8.2/accuracy-check.yml index 5b34b9bc42a..a5eaa2b2017 100644 --- a/models/public/mozilla-deepspeech-0.8.2/accuracy-check.yml +++ b/models/public/mozilla-deepspeech-0.8.2/accuracy-check.yml @@ -24,6 +24,33 @@ models: - name: previous_state_h type: LSTM_INPUT value: 'cudnn_lstm/rnn/multi_rnn_cell/cell_0/cudnn_compatible_lstm_cell/GatherNd_1:0' + - framework: opencv + inputs: + - name: input_node + type: INPUT + layout: NHWC + shape: 16, 19, 26 + - name: previous_state_h + type: LSTM_INPUT + value: 'cudnn_lstm/rnn/multi_rnn_cell/cell_0/cudnn_compatible_lstm_cell/GatherNd_1:0' + shape: 2048 + - name: previous_state_c + type: LSTM_INPUT + value: 'cudnn_lstm/rnn/multi_rnn_cell/cell_0/cudnn_compatible_lstm_cell/GatherNd:0' + shape: 2048 + adapter: + type: ctc_beam_search_decoder_with_lm + probability_out: logits + logarithmic_prob: False + beam_size: 32 + + # Use option "accuracy_check [...] --model_attributes " to provide path to deepspeech-0.8.2-models.kenlm + # deepspeech-0.8.2-models.kenlm is generated from deepspeech-0.8.2-models.scorer by scorer_to_kenlm.py script. + lm_file: mozilla-deepspeech-0.8.2/deepspeech-0.8.2-models.kenlm + lm_alpha: 0.93128901720047 + lm_beta: 1.1834137439727783 + lm_vocabulary_offset: 941209108 + datasets: - name: librispeech-test-clean reader: diff --git a/models/public/mtcnn/accuracy-check.yml b/models/public/mtcnn/accuracy-check.yml index 1fff5a5cc10..f3a319cd187 100644 --- a/models/public/mtcnn/accuracy-check.yml +++ b/models/public/mtcnn/accuracy-check.yml @@ -95,6 +95,7 @@ evaluations: - name: data type: INPUT layout: NCWH + shape: 3, 720, 1280 preprocessing: - type: bgr_to_rgb - type: pyramid @@ -109,6 +110,7 @@ evaluations: - name: data type: INPUT layout: NCWH + shape: 3, 24, 24 preprocessing: - type: bgr_to_rgb @@ -120,12 +122,15 @@ evaluations: - name: data type: INPUT layout: NCWH + shape: 3, 48, 48 preprocessing: - type: bgr_to_rgb launchers: - framework: openvino + - framework: opencv + datasets: - name: wider diff --git a/models/public/netvlad-tf/accuracy-check.yml b/models/public/netvlad-tf/accuracy-check.yml index f270a008685..dc2aaeb4a76 100644 --- a/models/public/netvlad-tf/accuracy-check.yml +++ b/models/public/netvlad-tf/accuracy-check.yml @@ -3,6 +3,13 @@ models: launchers: - framework: openvino adapter: reid + - framework: opencv + inputs: + - name: Placeholder + type: INPUT + shape: 3, 200, 300 + adapter: reid + datasets: - name: pitts30k_val reader: pillow_imread diff --git a/models/public/nfnet-f0/accuracy-check.yml b/models/public/nfnet-f0/accuracy-check.yml index 9c7c21e62ff..0ef76f733b9 100644 --- a/models/public/nfnet-f0/accuracy-check.yml +++ b/models/public/nfnet-f0/accuracy-check.yml @@ -38,6 +38,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: image + type: INPUT + shape: 3, 256, 256 + adapter: classification datasets: - name: imagenet_1000_classes diff --git a/models/public/octave-resnet-26-0.25/accuracy-check.yml b/models/public/octave-resnet-26-0.25/accuracy-check.yml index 548c719eca4..d4466dbabeb 100644 --- a/models/public/octave-resnet-26-0.25/accuracy-check.yml +++ b/models/public/octave-resnet-26-0.25/accuracy-check.yml @@ -47,6 +47,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 224, 224 + adapter: classification datasets: - name: imagenet_1000_classes diff --git a/models/public/open-closed-eye-0001/accuracy-check.yml b/models/public/open-closed-eye-0001/accuracy-check.yml index db4dc8fb918..1a83e5f22ab 100644 --- a/models/public/open-closed-eye-0001/accuracy-check.yml +++ b/models/public/open-closed-eye-0001/accuracy-check.yml @@ -20,6 +20,13 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: input.1 + type: INPUT + shape: 3, 32, 32 + adapter: classification + datasets: - name: mrlEyes_2018_01 preprocessing: diff --git a/models/public/pelee-coco/accuracy-check.yml b/models/public/pelee-coco/accuracy-check.yml index 383afea05bd..ed63fdb6996 100644 --- a/models/public/pelee-coco/accuracy-check.yml +++ b/models/public/pelee-coco/accuracy-check.yml @@ -5,6 +5,13 @@ models: - framework: openvino adapter: ssd + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 304, 304 + adapter: ssd + datasets: - name: ms_coco_detection_80_class_with_background diff --git a/models/public/pspnet-pytorch/accuracy-check.yml b/models/public/pspnet-pytorch/accuracy-check.yml index 750e98ab71f..eeb13541bf1 100644 --- a/models/public/pspnet-pytorch/accuracy-check.yml +++ b/models/public/pspnet-pytorch/accuracy-check.yml @@ -3,6 +3,13 @@ models: launchers: - framework: openvino adapter: segmentation + - framework: opencv + inputs: + - name: input.1 + type: INPUT + shape: 3, 512, 512 + adapter: segmentation + datasets: - name: VOC2012_Segmentation preprocessing: diff --git a/models/public/quartznet-15x5-en/accuracy-check.yml b/models/public/quartznet-15x5-en/accuracy-check.yml index 5f744ed614b..86db5e68a2f 100644 --- a/models/public/quartznet-15x5-en/accuracy-check.yml +++ b/models/public/quartznet-15x5-en/accuracy-check.yml @@ -11,6 +11,17 @@ models: type: INPUT layout: NHWC + - framework: opencv + inputs: + - name: 'audio_signal' + type: INPUT + layout: NHWC + shape: 64, 128 + allow_reshape_input: true + adapter: + type: ctc_greedy_decoder + alphabet: [' ', a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, ''''] + datasets: - name: LibriSpeech_test_clean_npy_nemo reader: numpy_reader diff --git a/models/public/regnetx-3.2gf/accuracy-check.yml b/models/public/regnetx-3.2gf/accuracy-check.yml index 58bbaa5e321..f436b8165ea 100644 --- a/models/public/regnetx-3.2gf/accuracy-check.yml +++ b/models/public/regnetx-3.2gf/accuracy-check.yml @@ -39,6 +39,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 224, 224 + adapter: classification datasets: - name: imagenet_1000_classes diff --git a/models/public/repvgg-a0/accuracy-check.yml b/models/public/repvgg-a0/accuracy-check.yml index bb103190a0f..5522143d3f7 100644 --- a/models/public/repvgg-a0/accuracy-check.yml +++ b/models/public/repvgg-a0/accuracy-check.yml @@ -38,6 +38,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: input + type: INPUT + shape: 3, 224, 224 + adapter: classification datasets: - name: imagenet_1000_classes diff --git a/models/public/repvgg-b1/accuracy-check.yml b/models/public/repvgg-b1/accuracy-check.yml index 5d45311220d..3e3846c2ebf 100644 --- a/models/public/repvgg-b1/accuracy-check.yml +++ b/models/public/repvgg-b1/accuracy-check.yml @@ -38,6 +38,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: input + type: INPUT + shape: 3, 224, 224 + adapter: classification datasets: - name: imagenet_1000_classes diff --git a/models/public/repvgg-b3/accuracy-check.yml b/models/public/repvgg-b3/accuracy-check.yml index 52362f2f835..289aed74c91 100644 --- a/models/public/repvgg-b3/accuracy-check.yml +++ b/models/public/repvgg-b3/accuracy-check.yml @@ -38,6 +38,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: input + type: INPUT + shape: 3, 224, 224 + adapter: classification datasets: - name: imagenet_1000_classes diff --git a/models/public/resnest-50-pytorch/accuracy-check.yml b/models/public/resnest-50-pytorch/accuracy-check.yml index bf9dd074628..d84803e663b 100644 --- a/models/public/resnest-50-pytorch/accuracy-check.yml +++ b/models/public/resnest-50-pytorch/accuracy-check.yml @@ -5,6 +5,13 @@ models: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 224, 224 + adapter: classification + datasets: - name: imagenet_1000_classes reader: pillow_imread diff --git a/models/public/resnet-18-pytorch/accuracy-check.yml b/models/public/resnet-18-pytorch/accuracy-check.yml index 23a35f646ad..95f7a69bf88 100644 --- a/models/public/resnet-18-pytorch/accuracy-check.yml +++ b/models/public/resnet-18-pytorch/accuracy-check.yml @@ -53,6 +53,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 224, 224 + adapter: classification datasets: - name: imagenet_1000_classes diff --git a/models/public/resnet-34-pytorch/accuracy-check.yml b/models/public/resnet-34-pytorch/accuracy-check.yml index c6cf08d36fa..afef850a260 100644 --- a/models/public/resnet-34-pytorch/accuracy-check.yml +++ b/models/public/resnet-34-pytorch/accuracy-check.yml @@ -45,6 +45,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 224, 224 + adapter: classification datasets: - name: imagenet_1000_classes diff --git a/models/public/resnet-50-pytorch/accuracy-check.yml b/models/public/resnet-50-pytorch/accuracy-check.yml index 3d606596202..84e89cb55ce 100644 --- a/models/public/resnet-50-pytorch/accuracy-check.yml +++ b/models/public/resnet-50-pytorch/accuracy-check.yml @@ -53,6 +53,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 224, 224 + adapter: classification datasets: - name: imagenet_1000_classes diff --git a/models/public/resnet-50-tf/accuracy-check.yml b/models/public/resnet-50-tf/accuracy-check.yml index ed268e4b21c..121faf76bba 100644 --- a/models/public/resnet-50-tf/accuracy-check.yml +++ b/models/public/resnet-50-tf/accuracy-check.yml @@ -33,6 +33,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: map/TensorArrayStack/TensorArrayGatherV3 + type: INPUT + shape: 3, 224, 224 + adapter: classification datasets: - name: imagenet_1001_classes diff --git a/models/public/retinaface-resnet50-pytorch/accuracy-check.yml b/models/public/retinaface-resnet50-pytorch/accuracy-check.yml index 27145c7723c..18d3378a773 100644 --- a/models/public/retinaface-resnet50-pytorch/accuracy-check.yml +++ b/models/public/retinaface-resnet50-pytorch/accuracy-check.yml @@ -7,6 +7,17 @@ models: bboxes_output: face_rpn_bbox_pred scores_output: face_rpn_cls_prob landmarks_output: face_rpn_landmark_pred + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 640, 640 + adapter: + type: retinaface_pytorch + bboxes_output: face_rpn_bbox_pred + scores_output: face_rpn_cls_prob + landmarks_output: face_rpn_landmark_pred + datasets: - name: wider preprocessing: diff --git a/models/public/retinanet-tf/accuracy-check.yml b/models/public/retinanet-tf/accuracy-check.yml index f26628f7255..7c3e3351a30 100644 --- a/models/public/retinanet-tf/accuracy-check.yml +++ b/models/public/retinanet-tf/accuracy-check.yml @@ -4,6 +4,13 @@ models: - framework: openvino adapter: ssd + - framework: opencv + inputs: + - name: input_1 + type: INPUT + shape: 3, 1333, 1333 + adapter: ssd + datasets: - name: ms_coco_detection_80_class_without_background preprocessing: diff --git a/models/public/rexnet-v1-x1.0/accuracy-check.yml b/models/public/rexnet-v1-x1.0/accuracy-check.yml index 61e22b47d3e..8b603a7a78d 100644 --- a/models/public/rexnet-v1-x1.0/accuracy-check.yml +++ b/models/public/rexnet-v1-x1.0/accuracy-check.yml @@ -41,6 +41,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: input.1 + type: INPUT + shape: 3, 224, 224 + adapter: classification datasets: - name: imagenet_1000_classes diff --git a/models/public/rfcn-resnet101-coco-tf/accuracy-check.yml b/models/public/rfcn-resnet101-coco-tf/accuracy-check.yml index ceff159eb0e..f3db1c0ca86 100644 --- a/models/public/rfcn-resnet101-coco-tf/accuracy-check.yml +++ b/models/public/rfcn-resnet101-coco-tf/accuracy-check.yml @@ -1,5 +1,5 @@ models: - - name: rfcn-resnet101-coco-tf + - name: rfcn-resnet101-coco-tf launchers: - framework: openvino adapter: ssd @@ -8,6 +8,17 @@ models: type: CONST_INPUT value: [[600, 600, 1]] + - framework: opencv + inputs: + - name: image_info + type: CONST_INPUT + value: [[600, 600, 1]] + shape: 3 + - name: image_tensor + type: INPUT + shape: 3, 600, 600 + adapter: ssd + datasets: - name: ms_coco_detection_91_classes diff --git a/models/public/se-inception/accuracy-check.yml b/models/public/se-inception/accuracy-check.yml index 782033753c3..88e654665f6 100644 --- a/models/public/se-inception/accuracy-check.yml +++ b/models/public/se-inception/accuracy-check.yml @@ -29,6 +29,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 224, 224 + adapter: classification datasets: - name: imagenet_1000_classes_2015 diff --git a/models/public/se-resnet-50/accuracy-check.yml b/models/public/se-resnet-50/accuracy-check.yml index f6ff5844a7a..81e9c445da7 100644 --- a/models/public/se-resnet-50/accuracy-check.yml +++ b/models/public/se-resnet-50/accuracy-check.yml @@ -29,6 +29,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 224, 224 + adapter: classification datasets: - name: imagenet_1000_classes_2015 diff --git a/models/public/se-resnext-50/accuracy-check.yml b/models/public/se-resnext-50/accuracy-check.yml index 34fc9b83c1f..9f564c18e9b 100644 --- a/models/public/se-resnext-50/accuracy-check.yml +++ b/models/public/se-resnext-50/accuracy-check.yml @@ -29,6 +29,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 224, 224 + adapter: classification datasets: - name: imagenet_1000_classes_2015 diff --git a/models/public/shufflenet-v2-x0.5/accuracy-check.yml b/models/public/shufflenet-v2-x0.5/accuracy-check.yml index 966ba1f6cae..97aff051939 100644 --- a/models/public/shufflenet-v2-x0.5/accuracy-check.yml +++ b/models/public/shufflenet-v2-x0.5/accuracy-check.yml @@ -4,6 +4,13 @@ models: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 224, 224 + adapter: classification + datasets: - name: imagenet_1000_classes reader: pillow_imread diff --git a/models/public/shufflenet-v2-x1.0/accuracy-check.yml b/models/public/shufflenet-v2-x1.0/accuracy-check.yml index 82815b1c2f6..969a4f1dfcd 100644 --- a/models/public/shufflenet-v2-x1.0/accuracy-check.yml +++ b/models/public/shufflenet-v2-x1.0/accuracy-check.yml @@ -5,6 +5,13 @@ models: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: input + type: INPUT + shape: 3, 224, 224 + adapter: classification + datasets: - name: imagenet_1000_classes reader: pillow_imread diff --git a/models/public/single-human-pose-estimation-0001/accuracy-check.yml b/models/public/single-human-pose-estimation-0001/accuracy-check.yml index 075a3249b89..80f27de6dfb 100644 --- a/models/public/single-human-pose-estimation-0001/accuracy-check.yml +++ b/models/public/single-human-pose-estimation-0001/accuracy-check.yml @@ -6,6 +6,13 @@ models: adapter: type: single_human_pose_estimation + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 384, 288 + adapter: single_human_pose_estimation + datasets: - name: ms_coco_single_keypoints preprocessing: diff --git a/models/public/squeezenet1.0/accuracy-check.yml b/models/public/squeezenet1.0/accuracy-check.yml index 25f3fba285b..2d2da6c812f 100644 --- a/models/public/squeezenet1.0/accuracy-check.yml +++ b/models/public/squeezenet1.0/accuracy-check.yml @@ -28,6 +28,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 227, 227 + adapter: classification datasets: - name: imagenet_1000_classes diff --git a/models/public/squeezenet1.1/accuracy-check.yml b/models/public/squeezenet1.1/accuracy-check.yml index ffcb52825a9..7e6ec4abf1a 100644 --- a/models/public/squeezenet1.1/accuracy-check.yml +++ b/models/public/squeezenet1.1/accuracy-check.yml @@ -28,6 +28,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 227, 227 + adapter: classification datasets: - name: imagenet_1000_classes diff --git a/models/public/ssd-resnet34-1200-onnx/accuracy-check.yml b/models/public/ssd-resnet34-1200-onnx/accuracy-check.yml index 700a5d0fece..d5acbefa579 100644 --- a/models/public/ssd-resnet34-1200-onnx/accuracy-check.yml +++ b/models/public/ssd-resnet34-1200-onnx/accuracy-check.yml @@ -1,5 +1,5 @@ models: - - name: ssd-resnet34-1200-onnx + - name: ssd-resnet34-1200-onnx launchers: - framework: openvino adapter: @@ -8,6 +8,16 @@ models: labels_out: '.*labels*' bboxes_out: '.*bboxes*' + - framework: opencv + inputs: + - name: image + type: INPUT + shape: 3, 1200, 1200 + adapter: + type: ssd_onnx + scores_out: '.*scores*' + labels_out: '.*labels*' + bboxes_out: '.*bboxes*' datasets: - name: ms_coco_detection_80_class_with_background diff --git a/models/public/ssd300/accuracy-check.yml b/models/public/ssd300/accuracy-check.yml index 7c3acf3a1a3..070497b10d3 100644 --- a/models/public/ssd300/accuracy-check.yml +++ b/models/public/ssd300/accuracy-check.yml @@ -25,6 +25,12 @@ models: launchers: - framework: openvino adapter: ssd + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 300, 300 + adapter: ssd datasets: - name: VOC2007_detection diff --git a/models/public/ssd512/accuracy-check.yml b/models/public/ssd512/accuracy-check.yml index e06c225d3b1..8fefb79ff0e 100644 --- a/models/public/ssd512/accuracy-check.yml +++ b/models/public/ssd512/accuracy-check.yml @@ -26,6 +26,12 @@ models: launchers: - framework: openvino adapter: ssd + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 512, 512 + adapter: ssd datasets: - name: VOC2007_detection diff --git a/models/public/ssd_mobilenet_v1_coco/accuracy-check.yml b/models/public/ssd_mobilenet_v1_coco/accuracy-check.yml index f8de01ec684..fa511f2f740 100644 --- a/models/public/ssd_mobilenet_v1_coco/accuracy-check.yml +++ b/models/public/ssd_mobilenet_v1_coco/accuracy-check.yml @@ -1,9 +1,16 @@ models: - - name: ssd_mobilenet_v1_coco + - name: ssd_mobilenet_v1_coco launchers: - framework: openvino adapter: ssd + - framework: opencv + inputs: + - name: image_tensor + type: INPUT + shape: 3, 300, 300 + adapter: ssd + datasets: - name: ms_coco_detection_91_classes preprocessing: diff --git a/models/public/ssd_mobilenet_v1_fpn_coco/accuracy-check.yml b/models/public/ssd_mobilenet_v1_fpn_coco/accuracy-check.yml index 372a810be47..04c24a4e7b4 100644 --- a/models/public/ssd_mobilenet_v1_fpn_coco/accuracy-check.yml +++ b/models/public/ssd_mobilenet_v1_fpn_coco/accuracy-check.yml @@ -1,9 +1,16 @@ models: - - name: ssd_mobilenet_v1_fpn_coco + - name: ssd_mobilenet_v1_fpn_coco launchers: - framework: openvino adapter: ssd + - framework: opencv + inputs: + - name: image_tensor + type: INPUT + shape: 3, 640, 640 + adapter: ssd + datasets: - name: ms_coco_detection_91_classes preprocessing: diff --git a/models/public/ssdlite_mobilenet_v2/accuracy-check.yml b/models/public/ssdlite_mobilenet_v2/accuracy-check.yml index 2e7633e3150..a7aa1de899b 100644 --- a/models/public/ssdlite_mobilenet_v2/accuracy-check.yml +++ b/models/public/ssdlite_mobilenet_v2/accuracy-check.yml @@ -1,9 +1,16 @@ models: - - name: ssdlite_mobilenet_v2 + - name: ssdlite_mobilenet_v2 launchers: - framework: openvino adapter: ssd + - framework: opencv + inputs: + - name: image_tensor + type: INPUT + shape: 3, 300, 300 + adapter: ssd + datasets: - name: ms_coco_detection_91_classes preprocessing: diff --git a/models/public/swin-tiny-patch4-window7-224/accuracy-check.yml b/models/public/swin-tiny-patch4-window7-224/accuracy-check.yml index 6f4094ceb8a..1b778d480fc 100644 --- a/models/public/swin-tiny-patch4-window7-224/accuracy-check.yml +++ b/models/public/swin-tiny-patch4-window7-224/accuracy-check.yml @@ -36,6 +36,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: input + type: INPUT + shape: 3, 224, 224 + adapter: classification datasets: - name: imagenet_1000_classes diff --git a/models/public/text-recognition-resnet-fc/accuracy-check.yml b/models/public/text-recognition-resnet-fc/accuracy-check.yml index c0c76430bcc..08e8df22a40 100644 --- a/models/public/text-recognition-resnet-fc/accuracy-check.yml +++ b/models/public/text-recognition-resnet-fc/accuracy-check.yml @@ -45,6 +45,53 @@ models: 35: "y" 36: "z" + - framework: opencv + inputs: + - name: input + type: INPUT + shape: 1, 32, 100 + adapter: + type: simple_decoder + eos_label: "[s]" + custom_label_map: + 0: "[s]" + 1: "0" + 2: "1" + 3: "2" + 4: "3" + 5: "4" + 6: "5" + 7: "6" + 8: "7" + 9: "8" + 10: "9" + 11: "a" + 12: "b" + 13: "c" + 14: "d" + 15: "e" + 16: "f" + 17: "g" + 18: "h" + 19: "i" + 20: "j" + 21: "k" + 22: "l" + 23: "m" + 24: "n" + 25: "o" + 26: "p" + 27: "q" + 28: "r" + 29: "s" + 30: "t" + 31: "u" + 32: "v" + 33: "w" + 34: "x" + 35: "y" + 36: "z" + datasets: - name: IIIT5K preprocessing: diff --git a/models/public/ultra-lightweight-face-detection-rfb-320/accuracy-check.yml b/models/public/ultra-lightweight-face-detection-rfb-320/accuracy-check.yml index 8a5aba85e13..54c0ae62728 100644 --- a/models/public/ultra-lightweight-face-detection-rfb-320/accuracy-check.yml +++ b/models/public/ultra-lightweight-face-detection-rfb-320/accuracy-check.yml @@ -45,6 +45,15 @@ models: type: ultra_lightweight_face_detection boxes_out: "boxes" scores_out: "scores" + - framework: opencv + inputs: + - name: input + type: INPUT + shape: 3, 240, 320 + adapter: + type: ultra_lightweight_face_detection + boxes_out: "boxes" + scores_out: "scores" datasets: - name: wider diff --git a/models/public/ultra-lightweight-face-detection-slim-320/accuracy-check.yml b/models/public/ultra-lightweight-face-detection-slim-320/accuracy-check.yml index 6d91d4edd0e..dde6a180f65 100644 --- a/models/public/ultra-lightweight-face-detection-slim-320/accuracy-check.yml +++ b/models/public/ultra-lightweight-face-detection-slim-320/accuracy-check.yml @@ -45,6 +45,15 @@ models: type: ultra_lightweight_face_detection boxes_out: "boxes" scores_out: "scores" + - framework: opencv + inputs: + - name: input + type: INPUT + shape: 3, 240, 320 + adapter: + type: ultra_lightweight_face_detection + boxes_out: "boxes" + scores_out: "scores" datasets: - name: wider diff --git a/models/public/vehicle-license-plate-detection-barrier-0123/accuracy-check.yml b/models/public/vehicle-license-plate-detection-barrier-0123/accuracy-check.yml index 8173b675a6b..e4de0f290cb 100644 --- a/models/public/vehicle-license-plate-detection-barrier-0123/accuracy-check.yml +++ b/models/public/vehicle-license-plate-detection-barrier-0123/accuracy-check.yml @@ -9,6 +9,13 @@ models: - framework: openvino adapter: ssd + - framework: opencv + inputs: + - name: Placeholder + type: INPUT + shape: 3, 256, 256 + adapter: ssd + datasets: - name: vehicle_license_plate_detection diff --git a/models/public/vehicle-reid-0001/accuracy-check.yml b/models/public/vehicle-reid-0001/accuracy-check.yml index c7814493e15..9f9d128f2f0 100644 --- a/models/public/vehicle-reid-0001/accuracy-check.yml +++ b/models/public/vehicle-reid-0001/accuracy-check.yml @@ -5,6 +5,13 @@ models: - framework: openvino adapter: reid + - framework: opencv + inputs: + - name: input + type: INPUT + shape: 3, 208, 208 + adapter: reid + datasets: - name: veri776 diff --git a/models/public/vgg16/accuracy-check.yml b/models/public/vgg16/accuracy-check.yml index cab511f8059..67da7e00c09 100644 --- a/models/public/vgg16/accuracy-check.yml +++ b/models/public/vgg16/accuracy-check.yml @@ -29,6 +29,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 224, 224 + adapter: classification datasets: - name: imagenet_1000_classes diff --git a/models/public/vgg19/accuracy-check.yml b/models/public/vgg19/accuracy-check.yml index 9ca539620bb..eda7ef75268 100644 --- a/models/public/vgg19/accuracy-check.yml +++ b/models/public/vgg19/accuracy-check.yml @@ -29,6 +29,12 @@ models: launchers: - framework: openvino adapter: classification + - framework: opencv + inputs: + - name: data + type: INPUT + shape: 3, 224, 224 + adapter: classification datasets: - name: imagenet_1000_classes diff --git a/models/public/vitstr-small-patch16-224/accuracy-check.yml b/models/public/vitstr-small-patch16-224/accuracy-check.yml index 8baeab707c1..051c5e9cb67 100644 --- a/models/public/vitstr-small-patch16-224/accuracy-check.yml +++ b/models/public/vitstr-small-patch16-224/accuracy-check.yml @@ -79,6 +79,19 @@ models: do_lower: True # Use option "accuracy_check [...] --model_attributes " to provide path to vocab.txt vocabulary_file: vitstr-small-patch16-224/vocab.txt + - framework: opencv + inputs: + - name: image + type: INPUT + shape: 1, 224, 224 + adapter: + type: simple_decoder + eos_label: '[s]' + start_label: '[GO]' + start_index: 1 + do_lower: True + # Use option "accuracy_check [...] --model_attributes " to provide path to vocab.txt + vocabulary_file: vitstr-small-patch16-224/vocab.txt datasets: - name: ICDAR2013 diff --git a/models/public/wav2vec2-base/accuracy-check.yml b/models/public/wav2vec2-base/accuracy-check.yml index 57e3b43cec6..be57971e570 100644 --- a/models/public/wav2vec2-base/accuracy-check.yml +++ b/models/public/wav2vec2-base/accuracy-check.yml @@ -5,7 +5,18 @@ models: allow_reshape_input: true adapter: type: wav2vec - alphabet: ['', '', '', '', '|', "E", "T", "A", "O", "N", "I", "H", "S", "R", "D", "L", "U", "M", "W", "C", "F", "G", "Y", "P", "B", "V", "K", "'", "X", "J", "Q", "Z"] + alphabet: [, , , , '|', E, T, A, O, N, I, H, S, R, D, L, + U, M, W, C, F, G, Y, P, B, V, K, "'", X, J, Q, Z] + - framework: opencv + inputs: + - name: inputs + type: INPUT + shape: 30480 + allow_reshape_input: true + adapter: + type: wav2vec + alphabet: [, , , , '|', E, T, A, O, N, I, H, S, R, D, L, + U, M, W, C, F, G, Y, P, B, V, K, "'", X, J, Q, Z] datasets: - name: librispeech-test-clean diff --git a/models/public/wavernn/wavernn-rnn/accuracy-check.yml b/models/public/wavernn/wavernn-rnn/accuracy-check.yml index 5b0d704627e..3901d42c63d 100644 --- a/models/public/wavernn/wavernn-rnn/accuracy-check.yml +++ b/models/public/wavernn/wavernn-rnn/accuracy-check.yml @@ -34,6 +34,48 @@ models: type: INPUT value: .*x allow_reshape_input: True + - framework: opencv + adapter: + type: multi_output_regression + outputs: + - h1 + - h2 + - logits + inputs: + - name: a1_t + type: INPUT + value: .*a1_t + shape: 32 + - name: a2_t + type: INPUT + value: .*a2_t + shape: 32 + - name: a3_t + type: INPUT + value: .*a3_t + shape: 32 + - name: a4_t + type: INPUT + value: .*a4_t + shape: 32 + - name: h1.1 + type: INPUT + value: .*h1* + shape: 512 + - name: h2.1 + type: INPUT + value: .*h2* + shape: 512 + - name: m_t + type: INPUT + value: .*m_t + shape: 80 + - name: x + type: INPUT + value: .*x + shape: 1 + allow_reshape_input: True + datasets: - name: wavernn-io metrics: diff --git a/models/public/wavernn/wavernn-upsampler/accuracy-check.yml b/models/public/wavernn/wavernn-upsampler/accuracy-check.yml index 86ee08d1505..346d6f70ef7 100644 --- a/models/public/wavernn/wavernn-upsampler/accuracy-check.yml +++ b/models/public/wavernn/wavernn-upsampler/accuracy-check.yml @@ -12,6 +12,20 @@ models: type: INPUT layout: NHWC allow_reshape_input: True + + - framework: opencv + adapter: + type: multi_output_regression + outputs: + - aux + - upsample_mels + inputs: + - name: mels + type: INPUT + layout: NHWC + shape: 200, 80 + allow_reshape_input: True + datasets: - name: wavernn-io-upsampler data_source: wavernn_io/upsampler diff --git a/models/public/yolact-resnet50-fpn-pytorch/accuracy-check.yml b/models/public/yolact-resnet50-fpn-pytorch/accuracy-check.yml index 0e962a45d84..d4a8f197ecd 100644 --- a/models/public/yolact-resnet50-fpn-pytorch/accuracy-check.yml +++ b/models/public/yolact-resnet50-fpn-pytorch/accuracy-check.yml @@ -10,15 +10,15 @@ models: mask_out: "mask" proto_out: "proto" datasets: - - name: ms_coco_mask_rcnn_short_80_classes_without_background + - name: ms_coco_mask_rcnn_short_80_classes_without_background reader: pillow_imread preprocessing: - type: resize size: 550 use_pillow: True - type: normalization - mean: 123.68, 116.78, 103.94 - std: 58.40, 57.12, 57.38 + mean: 123.68, 116.78, 103.94 + std: 58.40, 57.12, 57.38 metrics: - name: AP@masks type: coco_orig_segm_precision @@ -37,8 +37,19 @@ models: conf_out: "conf" mask_out: "mask" proto_out: "proto" + - framework: opencv + inputs: + - name: input.1 + type: INPUT + shape: 3, 550, 550 + adapter: + type: yolact + boxes_out: "boxes" + conf_out: "conf" + mask_out: "mask" + proto_out: "proto" datasets: - - name: ms_coco_mask_rcnn_short_80_classes_without_background + - name: ms_coco_mask_rcnn_short_80_classes_without_background preprocessing: - type: resize size: 550 diff --git a/models/public/yolo-v1-tiny-tf/accuracy-check.yml b/models/public/yolo-v1-tiny-tf/accuracy-check.yml index dea481c7e16..242ba50ce6d 100644 --- a/models/public/yolo-v1-tiny-tf/accuracy-check.yml +++ b/models/public/yolo-v1-tiny-tf/accuracy-check.yml @@ -56,6 +56,17 @@ models: num: 5 coords: 4 classes: 20 + - framework: opencv + inputs: + - name: input_1 + type: INPUT + shape: 3, 416, 416 + adapter: + type: yolo_v2 + anchors: 1.08,1.19, 3.42,4.41, 6.63,11.38, 9.42,5.11, 16.62,10.52 + num: 5 + coords: 4 + classes: 20 datasets: - name: VOC2007_detection_no_bkgr diff --git a/models/public/yolo-v2-tf/accuracy-check.yml b/models/public/yolo-v2-tf/accuracy-check.yml index b3ffa8aaa3a..aa8c44b553b 100644 --- a/models/public/yolo-v2-tf/accuracy-check.yml +++ b/models/public/yolo-v2-tf/accuracy-check.yml @@ -3,11 +3,12 @@ models: launchers: - framework: tf - model: yolo-v2.pb + model: yolo-v2.pb output_names: [conv2d_22/BiasAdd] adapter: type: yolo_v2 - anchors: "0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828" + anchors: "0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, + 3.52778, 9.77052, 9.16828" num: 5 coords: 4 classes: 80 @@ -52,7 +53,21 @@ models: - framework: openvino adapter: type: yolo_v2 - anchors: "0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828" + anchors: "0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, + 3.52778, 9.77052, 9.16828" + num: 5 + coords: 4 + classes: 80 + cells: 19 + - framework: opencv + inputs: + - name: image_input + type: INPUT + shape: 3, 608, 608 + adapter: + type: yolo_v2 + anchors: "0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, + 3.52778, 9.77052, 9.16828" num: 5 coords: 4 classes: 80 diff --git a/models/public/yolo-v2-tiny-tf/accuracy-check.yml b/models/public/yolo-v2-tiny-tf/accuracy-check.yml index 2e5a40c70db..d3f511f0177 100644 --- a/models/public/yolo-v2-tiny-tf/accuracy-check.yml +++ b/models/public/yolo-v2-tiny-tf/accuracy-check.yml @@ -3,10 +3,11 @@ models: launchers: - framework: tf - model: yolo-v2-tiny.pb + model: yolo-v2-tiny.pb adapter: type: yolo_v2 - anchors: "0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828" + anchors: "0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, + 3.52778, 9.77052, 9.16828" num: 5 coords: 4 classes: 80 @@ -50,7 +51,20 @@ models: - framework: openvino adapter: type: yolo_v2 - anchors: "0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828" + anchors: "0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, + 3.52778, 9.77052, 9.16828" + num: 5 + coords: 4 + classes: 80 + - framework: opencv + inputs: + - name: image_input + type: INPUT + shape: 3, 416, 416 + adapter: + type: yolo_v2 + anchors: "0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, + 3.52778, 9.77052, 9.16828" num: 5 coords: 4 classes: 80 diff --git a/models/public/yolo-v3-onnx/accuracy-check.yml b/models/public/yolo-v3-onnx/accuracy-check.yml index 90f41ac1f61..a3af475bbe4 100644 --- a/models/public/yolo-v3-onnx/accuracy-check.yml +++ b/models/public/yolo-v3-onnx/accuracy-check.yml @@ -47,6 +47,20 @@ models: inputs: - name: image_shape type: ORIG_IMAGE_INFO + + - framework: opencv + adapter: + type: yolo_v3_onnx + boxes_out: yolonms_layer_1/ExpandDims_1:0 + scores_out: yolonms_layer_1/ExpandDims_3:0 + indices_out: yolonms_layer_1/concat_2:0 + inputs: + - name: image_shape + type: ORIG_IMAGE_INFO + shape: 2 + - name: input_1 + type: INPUT + shape: 3, 416, 416 datasets: - name: ms_coco_detection_80_class_without_background reader: pillow_imread diff --git a/models/public/yolo-v3-tf/accuracy-check.yml b/models/public/yolo-v3-tf/accuracy-check.yml index c2a2dffc968..5a130d1b711 100644 --- a/models/public/yolo-v3-tf/accuracy-check.yml +++ b/models/public/yolo-v3-tf/accuracy-check.yml @@ -3,7 +3,7 @@ models: launchers: - framework: tf - model: yolo-v3.pb + model: yolo-v3.pb inputs: - name: 'input_1' type: INPUT @@ -72,6 +72,22 @@ models: - conv2d_58/Conv2D/YoloRegion - conv2d_66/Conv2D/YoloRegion - conv2d_74/Conv2D/YoloRegion + - framework: opencv + inputs: + - name: input_1 + type: INPUT + shape: 3, 416, 416 + adapter: + type: yolo_v3 + anchors: "10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326" + num: 9 + coords: 4 + classes: 80 + anchor_masks: [[6, 7, 8], [3, 4, 5], [0, 1, 2]] + outputs: + - conv2d_58/Conv2D/YoloRegion + - conv2d_66/Conv2D/YoloRegion + - conv2d_74/Conv2D/YoloRegion datasets: - name: ms_coco_detection_80_class_without_background diff --git a/models/public/yolo-v3-tiny-onnx/accuracy-check.yml b/models/public/yolo-v3-tiny-onnx/accuracy-check.yml index ee61784370a..89e2f342ccb 100644 --- a/models/public/yolo-v3-tiny-onnx/accuracy-check.yml +++ b/models/public/yolo-v3-tiny-onnx/accuracy-check.yml @@ -47,6 +47,20 @@ models: inputs: - name: image_shape type: ORIG_IMAGE_INFO + + - framework: opencv + adapter: + type: yolo_v3_onnx + boxes_out: yolonms_layer_1 + scores_out: yolonms_layer_1:1 + indices_out: yolonms_layer_1:2 + inputs: + - name: image_shape + type: ORIG_IMAGE_INFO + shape: 2 + - name: input_1 + type: INPUT + shape: 3, 416, 416 datasets: - name: ms_coco_detection_80_class_without_background reader: pillow_imread diff --git a/models/public/yolo-v3-tiny-tf/accuracy-check.yml b/models/public/yolo-v3-tiny-tf/accuracy-check.yml index 813afade730..87588e6553c 100644 --- a/models/public/yolo-v3-tiny-tf/accuracy-check.yml +++ b/models/public/yolo-v3-tiny-tf/accuracy-check.yml @@ -2,7 +2,7 @@ models: - name: yolo-v3-tiny-tf launchers: - framework: tf - model: yolo-v3-tiny.pb + model: yolo-v3-tiny.pb adapter: type: yolo_v3 anchors: tiny_yolo_v3 @@ -71,6 +71,22 @@ models: outputs: - conv2d_9/Conv2D/YoloRegion - conv2d_12/Conv2D/YoloRegion + - framework: opencv + inputs: + - name: image_input + type: INPUT + shape: 3, 416, 416 + adapter: + type: yolo_v3 + anchors: tiny_yolo_v3 + num: 3 + coords: 4 + classes: 80 + threshold: 0.001 + anchor_masks: [[3, 4, 5], [1, 2, 3]] + outputs: + - conv2d_9/Conv2D/YoloRegion + - conv2d_12/Conv2D/YoloRegion datasets: - name: ms_coco_detection_80_class_without_background diff --git a/models/public/yolo-v4-tf/accuracy-check.yml b/models/public/yolo-v4-tf/accuracy-check.yml index 6560ffd898b..8b88996dfb8 100644 --- a/models/public/yolo-v4-tf/accuracy-check.yml +++ b/models/public/yolo-v4-tf/accuracy-check.yml @@ -79,6 +79,25 @@ models: - Func/StatefulPartitionedCall/output/_544 - Func/StatefulPartitionedCall/output/_542 - Func/StatefulPartitionedCall/output/_543 + - framework: opencv + inputs: + - name: image_input + type: INPUT + shape: 3, 608, 608 + adapter: + type: yolo_v3 + anchors: 12,16,19,36,40,28,36,75,76,55,72,146,142,110,192,243,459,401 + num: 3 + coords: 4 + classes: 80 + threshold: 0.001 + anchor_masks: [[0, 1, 2], [3, 4, 5], [6, 7, 8]] + raw_output: True + outputs: + - StatefulPartitionedCall/model/conv2d_93/BiasAdd/Add + - StatefulPartitionedCall/model/conv2d_101/BiasAdd/Add + - StatefulPartitionedCall/model/conv2d_109/BiasAdd/Add + datasets: - name: ms_coco_detection_80_class_without_background preprocessing: diff --git a/models/public/yolo-v4-tiny-tf/accuracy-check.yml b/models/public/yolo-v4-tiny-tf/accuracy-check.yml index 21e64fd329a..185045d298e 100644 --- a/models/public/yolo-v4-tiny-tf/accuracy-check.yml +++ b/models/public/yolo-v4-tiny-tf/accuracy-check.yml @@ -2,7 +2,7 @@ models: - name: yolo-v4-tiny-tf launchers: - framework: tf - model: yolo-v4-tiny.pb + model: yolo-v4-tiny.pb adapter: type: yolo_v3 anchors: 10,14,23,27,37,58,81,82,135,169,344,319 @@ -76,6 +76,24 @@ models: outputs: - conv2d_20/BiasAdd - conv2d_17/BiasAdd + - framework: opencv + inputs: + - name: image_input + type: INPUT + shape: 3, 416, 416 + adapter: + type: yolo_v3 + anchors: 10,14,23,27,37,58,81,82,135,169,344,319 + num: 2 + coords: 4 + classes: 80 + threshold: 0.001 + anchor_masks: [[1, 2, 3], [3, 4, 5]] + raw_output: True + outputs: + - conv2d_20/BiasAdd/Add + - conv2d_17/BiasAdd/Add + datasets: - name: ms_coco_detection_80_class_without_background preprocessing: diff --git a/models/public/yolof/accuracy-check.yml b/models/public/yolof/accuracy-check.yml index 4d08ac8c177..0b13efb0948 100644 --- a/models/public/yolof/accuracy-check.yml +++ b/models/public/yolof/accuracy-check.yml @@ -15,6 +15,25 @@ models: multiple_labels: True outputs: - boxes + - framework: opencv + inputs: + - name: image + type: INPUT + shape: 3, 608, 608 + adapter: + type: yolof + anchors: 16,16,32,32,64,64,128,128,256,256,512,512 + num: 6 + cells: [38] + coords: 4 + classes: 80 + threshold: 0.05 + raw_output: true + output_format: BHW + multiple_labels: true + outputs: + - boxes + datasets: - name: ms_coco_detection_80_class_without_background diff --git a/models/public/yolox-tiny/accuracy-check.yml b/models/public/yolox-tiny/accuracy-check.yml index c66a55a264d..ddf7f26f496 100644 --- a/models/public/yolox-tiny/accuracy-check.yml +++ b/models/public/yolox-tiny/accuracy-check.yml @@ -46,6 +46,12 @@ models: launchers: - framework: openvino adapter: yolox + - framework: opencv + inputs: + - name: images + type: INPUT + shape: 3, 416, 416 + adapter: yolox datasets: - name: ms_coco_detection_80_class_without_background