|
1 | 1 | from rknn.api import RKNN |
2 | 2 | import os |
3 | 3 |
|
| 4 | +# for suffix in ["n", "s", "m", "l", "x"]: |
4 | 5 | for suffix in ["n", "s", "m", "l", "x"]: |
5 | | - for soc in ["rk3562","rk3566", "rk3568", "rk3588"]: |
6 | | - INPUT_MODEL = 'yolov8{}.onnx'.format(suffix) |
7 | | - WIDTH = 320 |
8 | | - HEIGHT = 320 |
9 | | - OUTPUT_MODEL_BASENAME = 'yolov8{}'.format(suffix) |
10 | | - QUANTIZATION = False |
11 | | - DATASET = './dataset_coco10.txt' |
| 6 | + # for soc in ["rk3562","rk3566", "rk3568", "rk3588"]: |
| 7 | + for soc in ["rk3588"]: |
| 8 | + # for QUANTIZATION in [True, False]: |
| 9 | + for QUANTIZATION in [True]: |
| 10 | + INPUT_MODEL = 'yolov8{}.onnx'.format(suffix) |
| 11 | + WIDTH = 320 |
| 12 | + HEIGHT = 320 |
| 13 | + OUTPUT_MODEL_BASENAME = 'yolov8{}'.format(suffix) |
| 14 | + # QUANTIZATION = False |
| 15 | + DATASET = './datasets/coco20/dataset_coco20.txt' |
12 | 16 |
|
13 | | - # Config |
14 | | - MEAN_VALUES = [[0, 0, 0]] |
15 | | - STD_VALUES = [[255, 255, 255]] |
16 | | - QUANT_IMG_RGB2BGR = True |
17 | | - QUANTIZED_DTYPE = "asymmetric_quantized-8" |
18 | | - QUANTIZED_ALGORITHM = "normal" |
19 | | - QUANTIZED_METHOD = "channel" |
20 | | - FLOAT_DTYPE = "float16" |
21 | | - OPTIMIZATION_LEVEL = 2 |
22 | | - TARGET_PLATFORM = soc |
23 | | - CUSTOM_STRING = None |
24 | | - REMOVE_WEIGHT = None |
25 | | - COMPRESS_WEIGHT = False |
26 | | - SINGLE_CORE_MODE = False |
27 | | - MODEL_PRUNNING = False |
28 | | - OP_TARGET = None |
29 | | - DYNAMIC_INPUT = None |
| 17 | + # Config |
| 18 | + MEAN_VALUES = [[0, 0, 0]] |
| 19 | + STD_VALUES = [[255, 255, 255]] |
| 20 | + QUANT_IMG_RGB2BGR = True |
| 21 | + QUANTIZED_DTYPE = "asymmetric_quantized-8" |
| 22 | + QUANTIZED_ALGORITHM = "normal" |
| 23 | + QUANTIZED_METHOD = "channel" |
| 24 | + FLOAT_DTYPE = "float16" |
| 25 | + OPTIMIZATION_LEVEL = 2 |
| 26 | + TARGET_PLATFORM = soc |
| 27 | + CUSTOM_STRING = None |
| 28 | + REMOVE_WEIGHT = None |
| 29 | + COMPRESS_WEIGHT = False |
| 30 | + SINGLE_CORE_MODE = False |
| 31 | + MODEL_PRUNNING = False |
| 32 | + OP_TARGET = None |
| 33 | + DYNAMIC_INPUT = None |
30 | 34 |
|
31 | | - OUTPUT_MODEL_FILE = "./output/{}/{}-{}x{}-{}.rknn".format(soc, OUTPUT_MODEL_BASENAME, WIDTH, HEIGHT, soc) |
32 | | - os.makedirs("./output/{}".format(soc), exist_ok=True) |
| 35 | + if QUANTIZATION: |
| 36 | + quant_suff = "-i8" |
| 37 | + else: |
| 38 | + quant_suff = "" |
| 39 | + |
| 40 | + OUTPUT_MODEL_FILE = "./output/{}/{}-{}x{}{}-{}.rknn".format(soc, OUTPUT_MODEL_BASENAME, WIDTH, HEIGHT, quant_suff, soc) |
| 41 | + os.makedirs("./output/{}".format(soc), exist_ok=True) |
33 | 42 |
|
34 | | - rknn = RKNN() |
35 | | - rknn.config(mean_values=MEAN_VALUES, |
36 | | - std_values=STD_VALUES, |
37 | | - quant_img_RGB2BGR=QUANT_IMG_RGB2BGR, |
38 | | - quantized_dtype=QUANTIZED_DTYPE, |
39 | | - quantized_algorithm=QUANTIZED_ALGORITHM, |
40 | | - quantized_method=QUANTIZED_METHOD, |
41 | | - float_dtype=FLOAT_DTYPE, |
42 | | - optimization_level=OPTIMIZATION_LEVEL, |
43 | | - target_platform=TARGET_PLATFORM, |
44 | | - custom_string=CUSTOM_STRING, |
45 | | - remove_weight=REMOVE_WEIGHT, |
46 | | - compress_weight=COMPRESS_WEIGHT, |
47 | | - single_core_mode=SINGLE_CORE_MODE, |
48 | | - model_pruning=MODEL_PRUNNING, |
49 | | - op_target=OP_TARGET, |
50 | | - dynamic_input=DYNAMIC_INPUT) |
| 43 | + rknn = RKNN() |
| 44 | + rknn.config(mean_values=MEAN_VALUES, |
| 45 | + std_values=STD_VALUES, |
| 46 | + quant_img_RGB2BGR=QUANT_IMG_RGB2BGR, |
| 47 | + quantized_dtype=QUANTIZED_DTYPE, |
| 48 | + quantized_algorithm=QUANTIZED_ALGORITHM, |
| 49 | + quantized_method=QUANTIZED_METHOD, |
| 50 | + float_dtype=FLOAT_DTYPE, |
| 51 | + optimization_level=OPTIMIZATION_LEVEL, |
| 52 | + target_platform=TARGET_PLATFORM, |
| 53 | + custom_string=CUSTOM_STRING, |
| 54 | + remove_weight=REMOVE_WEIGHT, |
| 55 | + compress_weight=COMPRESS_WEIGHT, |
| 56 | + single_core_mode=SINGLE_CORE_MODE, |
| 57 | + model_pruning=MODEL_PRUNNING, |
| 58 | + op_target=OP_TARGET, |
| 59 | + dynamic_input=DYNAMIC_INPUT) |
51 | 60 |
|
52 | | - # if rknn.load_pytorch("./input/" + INPUT_MODEL, [[HEIGHT, WIDTH, 3]]) != 0: |
53 | | - if rknn.load_onnx("./input/" + INPUT_MODEL) != 0: |
54 | | - print('Error loading model.') |
55 | | - exit() |
| 61 | + # if rknn.load_pytorch("./input/" + INPUT_MODEL, [[HEIGHT, WIDTH, 3]]) != 0: |
| 62 | + if rknn.load_onnx("./input/" + INPUT_MODEL) != 0: |
| 63 | + print('Error loading model.') |
| 64 | + exit() |
56 | 65 |
|
57 | | - if rknn.build(do_quantization=QUANTIZATION, dataset=DATASET) != 0: |
58 | | - print('Error building model.') |
59 | | - exit() |
| 66 | + if rknn.build(do_quantization=QUANTIZATION, dataset=DATASET) != 0: |
| 67 | + print('Error building model.') |
| 68 | + exit() |
60 | 69 |
|
61 | | - if rknn.export_rknn(OUTPUT_MODEL_FILE) != 0: |
62 | | - print('Error exporting rknn model.') |
63 | | - exit() |
| 70 | + if rknn.export_rknn(OUTPUT_MODEL_FILE) != 0: |
| 71 | + print('Error exporting rknn model.') |
| 72 | + exit() |
0 commit comments