|
14 | 14 |
|
15 | 15 | import os
|
16 | 16 | import re
|
17 |
| -from args import DataPatternArg |
18 |
| -from copy import deepcopy |
| 17 | +#from args import DataPatternArg |
19 | 18 |
|
20 | 19 | from cases import BASE, single_option_cases
|
21 | 20 |
|
@@ -49,25 +48,46 @@ def write_to_csv(self, result, test_case, device):
|
49 | 48 |
|
50 | 49 | if not os.path.isfile(self.filename):
|
51 | 50 | models_col = [f"Model {key}" for key in self.model_keys]
|
52 |
| - columns = ','.join(['Device', *models_col, *result.keys()]) |
| 51 | + precisions_col = [f"Precision {key}" for key in self.model_keys] |
| 52 | + columns = ','.join(['Device', *precisions_col, *models_col, *result.keys()]) |
53 | 53 | with open(self.filename, 'w') as f:
|
54 | 54 | print(columns, file=f)
|
55 | 55 |
|
| 56 | + precisions = [test_case.options[key].precision if key in test_case.options else '-' |
| 57 | + for key in self.model_keys] |
56 | 58 | models_names = [test_case.options[key].name if key in test_case.options else '-'
|
57 | 59 | for key in self.model_keys]
|
58 |
| - data = ','.join([device, *models_names, *result.values()]) |
| 60 | + data = ','.join([device, *precisions, *models_names, *result.values()]) |
59 | 61 | with open(self.filename, 'a') as f:
|
60 | 62 | print(data, file=f)
|
61 | 63 |
|
62 | 64 |
|
63 | 65 | DEMOS = [
|
64 |
| - deepcopy(BASE['interactive_face_detection_demo/cpp']).add_parser(PerformanceParser), |
| 66 | + BASE['interactive_face_detection_demo/cpp_gapi'].add_parser(PerformanceParser), |
65 | 67 |
|
66 |
| - deepcopy(BASE['object_detection_demo/python']) |
| 68 | + BASE['interactive_face_detection_demo/cpp'].add_parser(PerformanceParser), |
| 69 | + |
| 70 | + BASE['object_detection_demo/python'] |
67 | 71 | .only_models(['person-detection-0200', 'yolo-v2-tf'])
|
68 |
| - .update_option({'-i': DataPatternArg('action-recognition')}) |
| 72 | + # TODO: create large -i for performance scenario |
| 73 | + # .update_option({'-i': DataPatternArg('action-recognition')}) |
69 | 74 | .add_test_cases(single_option_cases('-nireq', '3', '5'),
|
70 | 75 | single_option_cases('-nstreams', '3', '4'),
|
71 | 76 | single_option_cases('-nthreads', str(THREADS_NUM), str(THREADS_NUM - 2)))
|
72 |
| - .add_parser(PerformanceParser) |
| 77 | + .add_parser(PerformanceParser), |
| 78 | + |
| 79 | + BASE['bert_named_entity_recognition_demo/python'] |
| 80 | + .update_option({'--dynamic_shape': None}) |
| 81 | + .only_devices(['CPU']) |
| 82 | + .add_parser(PerformanceParser), |
| 83 | + |
| 84 | + BASE['gpt2_text_prediction_demo/python'] |
| 85 | + .update_option({'--dynamic_shape': None}) |
| 86 | + .only_devices(['CPU']) |
| 87 | + .add_parser(PerformanceParser), |
| 88 | + |
| 89 | + BASE['speech_recognition_wav2vec_demo/python'] |
| 90 | + .update_option({'--dynamic_shape': None}) |
| 91 | + .only_devices(['CPU']) |
| 92 | + .add_parser(PerformanceParser), |
73 | 93 | ]
|
0 commit comments