File tree Expand file tree Collapse file tree 5 files changed +175
-0
lines changed
Expand file tree Collapse file tree 5 files changed +175
-0
lines changed Original file line number Diff line number Diff line change 1+ arch : efficientnet-lite0 # model architecture
2+ model_path : # pretrained full-precision ckpt
3+ deploy : False
4+ evaluate : False
5+ pretrained : True
6+ resume : # resume qat ckpt
7+
8+ epochs : 120
9+ start_epoch : 0
10+ batch_size : 512 # 64 * 8 gpu
11+ optim : adam
12+ lr : 0.0001
13+ lr_scheduler : Cosine # T = 120_epochs
14+ weight_decay : 0.000
15+
16+ # The following configs in 'qparams' can be loaded as
17+ # extra_qparams["extra_qconfig_dict"] for preparation in academic setting
18+ quantization :
19+ enabled : True
20+ type : Academic
21+ qparams :
22+ w_observer : LSQObserver
23+ w_fakequantize : LearnableFakeQuantize
24+ w_qscheme :
25+ bit : 4
26+ symmetry : True
27+ per_channel : False
28+ pot_scale : False
29+ a_observer : LSQObserver
30+ a_fakequantize : LearnableFakeQuantize
31+ a_qscheme :
32+ bit : 4
33+ symmetry : False
34+ per_channel : False
35+ pot_scale : False
Original file line number Diff line number Diff line change 1+ arch : mobilenetv2 # model architecture
2+ model_path : # pretrained full-precision ckpt
3+ deploy : False
4+ evaluate : False
5+ pretrained : True
6+ resume : # resume qat ckpt
7+
8+ epochs : 72
9+ start_epoch : 0
10+ batch_size : 512 # 64 * 8 gpu
11+ optim : adam
12+ lr : 0.0004
13+ lr_scheduler : Cosine # T = 72_epochs
14+ weight_decay : 0.000
15+
16+ # The following configs in 'qparams' can be loaded as
17+ # extra_qparams["extra_qconfig_dict"] for preparation in academic setting
18+ quantization :
19+ enabled : True
20+ type : Academic
21+ qparams :
22+ w_observer : LSQObserver
23+ w_fakequantize : LearnableFakeQuantize
24+ w_qscheme :
25+ bit : 4
26+ symmetry : True
27+ per_channel : False
28+ pot_scale : False
29+ a_observer : LSQObserver
30+ a_fakequantize : LearnableFakeQuantize
31+ a_qscheme :
32+ bit : 4
33+ symmetry : False
34+ per_channel : False
35+ pot_scale : False
Original file line number Diff line number Diff line change 1+ arch : regnetx600m # model architecture
2+ model_path : # pretrained full-precision ckpt
3+ deploy : False
4+ evaluate : False
5+ pretrained : True
6+ resume : # resume qat ckpt
7+
8+ epochs : 60
9+ start_epoch : 0
10+ batch_size : 256 # 32 * 8 gpu
11+ optim : adam
12+ lr : 0.0002
13+ lr_scheduler : Cosine # T = 60_epochs
14+ weight_decay : 0.000
15+
16+ # The following configs in 'qparams' can be loaded as
17+ # extra_qparams["extra_qconfig_dict"] for preparation in academic setting
18+ quantization :
19+ enabled : True
20+ type : Academic
21+ qparams :
22+ w_observer : LSQObserver
23+ w_fakequantize : LearnableFakeQuantize
24+ w_qscheme :
25+ bit : 4
26+ symmetry : True
27+ per_channel : False
28+ pot_scale : False
29+ a_observer : LSQObserver
30+ a_fakequantize : LearnableFakeQuantize
31+ a_qscheme :
32+ bit : 4
33+ symmetry : False
34+ per_channel : False
35+ pot_scale : False
Original file line number Diff line number Diff line change 1+ arch : resnet18 # model architecture
2+ model_path : # pretrained full-precision ckpt
3+ deploy : False
4+ evaluate : False
5+ pretrained : True
6+ resume : # resume qat ckpt
7+
8+ epochs : 100
9+ start_epoch : 0
10+ batch_size : 512 # 128 * 4 gpu
11+ optim : adam
12+ lr : 0.00005
13+ lr_scheduler : Cosine # T = 100_epochs
14+ weight_decay : 0.000
15+
16+ # The following configs in 'qparams' can be loaded as
17+ # extra_qparams["extra_qconfig_dict"] for preparation in academic setting
18+ quantization :
19+ enabled : True
20+ type : Academic
21+ qparams :
22+ w_observer : LSQObserver
23+ w_fakequantize : LearnableFakeQuantize
24+ w_qscheme :
25+ bit : 4
26+ symmetry : True
27+ per_channel : False
28+ pot_scale : False
29+ a_observer : LSQObserver
30+ a_fakequantize : LearnableFakeQuantize
31+ a_qscheme :
32+ bit : 4
33+ symmetry : False
34+ per_channel : False
35+ pot_scale : False
Original file line number Diff line number Diff line change 1+ arch : resnet50 # model architecture
2+ model_path : # pretrained full-precision ckpt
3+ deploy : False
4+ evaluate : False
5+ pretrained : True
6+ resume : # resume qat ckpt
7+
8+ epochs : 150
9+ start_epoch : 0
10+ batch_size : 384 # 48 * 8 gpu
11+ optim : adam
12+ lr : 0.0001
13+ lr_scheduler : Cosine # T = 150_epochs
14+ weight_decay : 0.000
15+
16+ # The following configs in 'qparams' can be loaded as
17+ # extra_qparams["extra_qconfig_dict"] for preparation in academic setting
18+ quantization :
19+ enabled : True
20+ type : Academic
21+ qparams :
22+ w_observer : LSQObserver
23+ w_fakequantize : LearnableFakeQuantize
24+ w_qscheme :
25+ bit : 4
26+ symmetry : True
27+ per_channel : False
28+ pot_scale : False
29+ a_observer : LSQObserver
30+ a_fakequantize : LearnableFakeQuantize
31+ a_qscheme :
32+ bit : 4
33+ symmetry : False
34+ per_channel : False
35+ pot_scale : False
You can’t perform that action at this time.
0 commit comments