Skip to content

Commit bfdb762

Browse files
Tracinzhangqi3
andauthored
[CI] Fix lint test python version. (#225)
* [CI] Fix lint test python version. Co-authored-by: zhangqi3 <[email protected]>
1 parent b8e3015 commit bfdb762

File tree

7 files changed

+22
-34
lines changed

7 files changed

+22
-34
lines changed

.github/workflows/lint-and-test.yml

Lines changed: 8 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ on: [push]
44

55
jobs:
66
Lint-and-test:
7-
runs-on: ubuntu-latest
7+
runs-on: ubuntu-18.04
88
strategy:
99
max-parallel: 5
1010

@@ -13,28 +13,21 @@ jobs:
1313
- name: Set up Python 3.6
1414
uses: actions/setup-python@v2
1515
with:
16-
python-version: 3.6
17-
- name: Add conda to system path
18-
run: |
19-
# $CONDA is an environment variable pointing to the root of the miniconda directory
20-
echo $CONDA/bin >> $GITHUB_PATH
16+
python-version: 3.7
2117
- name: Lint with flake8
2218
run: |
23-
conda install flake8
19+
pip install flake8
2420
flake8 .
25-
- name: Install onnxruntime and onnxsim
26-
run:
27-
pip install onnxruntime onnx-simplifier
28-
- name: Install prettytable
29-
run:
30-
pip install prettytable
3121
- name: Install Protobuf
3222
run:
33-
conda install protobuf=3.20.1
23+
pip install protobuf==3.19.0
24+
- name: Install onnx onnxruntime and onnxsim
25+
run:
26+
pip install onnx==1.7.0 onnxruntime onnx-simplifier
3427
- name: Install MQBench
3528
run: |
3629
python setup.py develop
3730
- name: Test with pytest
3831
run: |
39-
conda install pytest
32+
pip install pytest
4033
pytest test --junitxml=report.xml

mqbench/custom_quantizer/onnx_qnn_quantizer.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -56,9 +56,9 @@ def _qat_swap_modules(self, root: GraphModule, additional_qat_module_mapping: Di
5656
all_mappings = get_combined_dict(
5757
get_default_qat_module_mappings(), additional_qat_module_mapping)
5858
# There is no QLinearFC in ONNX for now.
59-
del(all_mappings[torch.nn.modules.linear.Linear])
60-
del(all_mappings[torch.nn.intrinsic.modules.fused.LinearReLU])
61-
del(all_mappings[qnni.modules.fused.LinearBn1d])
59+
del all_mappings[torch.nn.modules.linear.Linear]
60+
del all_mappings[torch.nn.intrinsic.modules.fused.LinearReLU]
61+
del all_mappings[qnni.modules.fused.LinearBn1d]
6262
root = self._convert(root, all_mappings, inplace=True)
6363
return root
6464

mqbench/deploy/common.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ def insert_node_purely(self, node, idx=0):
9393

9494
def del_initializer(self, initializer_name):
9595
if initializer_name in self.initializer:
96-
del(self.initializer[initializer_name])
96+
del self.initializer[initializer_name]
9797

9898
def optimize_model(self):
9999
# Delete redundant nodes.

mqbench/deploy/deploy_stpu.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
from collections import OrderedDict
44

55
import onnx
6-
from onnx import numpy_helper
76

87
from mqbench.deploy.common import (get_constant_inputs, prepare_data,
98
prepare_initializer,
@@ -15,10 +14,6 @@
1514

1615
class STPU_process(LinearQuantizer_process):
1716

18-
@staticmethod
19-
def get_constant(node: onnx.NodeProto):
20-
return numpy_helper.to_array(node.attribute[0].t).tolist()
21-
2217
def remove_fakequantize_and_collect_params(self, onnx_path, model_name):
2318
model = onnx.load(onnx_path)
2419
graph = model.graph

mqbench/fuser_method_mappings.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ def fuse_linear_bn(linear, bn):
4747
>>> b1 = nn.BatchNorm1d(20)
4848
>>> m2 = fuse_linear_bn(m1, b1)
4949
"""
50-
assert(linear.training == bn.training),\
50+
assert linear.training == bn.training, \
5151
"Linear and BN both must be in the same mode (train or eval)."
5252

5353
if linear.training:
@@ -59,7 +59,7 @@ def fuse_linear_bn(linear, bn):
5959

6060

6161
def fuse_deconv_bn(deconv, bn):
62-
assert(deconv.training == bn.training),\
62+
assert deconv.training == bn.training, \
6363
'DeConv and BN must be in the same mode (train or eval)'
6464

6565
if deconv.training:
@@ -72,7 +72,7 @@ def fuse_deconv_bn(deconv, bn):
7272

7373

7474
def fuse_deconv_bn_relu(deconv, bn, relu):
75-
assert(deconv.training == bn.training == relu.training),\
75+
assert deconv.training == bn.training == relu.training, \
7676
"DeConv and BN both must be in the same mode (train or eval)."
7777

7878
if deconv.training:
@@ -85,7 +85,7 @@ def fuse_deconv_bn_relu(deconv, bn, relu):
8585

8686

8787
def fuse_conv_freezebn(conv, bn):
88-
assert(bn.training is False), "Freezebn must be eval."
88+
assert bn.training is False, "Freezebn must be eval."
8989

9090
fused_module_class_map = {
9191
nn.Conv2d: qnni.ConvFreezebn2d,
@@ -102,7 +102,7 @@ def fuse_conv_freezebn(conv, bn):
102102

103103

104104
def fuse_conv_freezebn_relu(conv, bn, relu):
105-
assert(conv.training == relu.training and bn.training is False), "Conv and relu both must be in the same mode (train or eval) and bn must be eval."
105+
assert conv.training == relu.training and bn.training is False, "Conv and relu both must be in the same mode (train or eval) and bn must be eval."
106106
fused_module : Optional[Type[nn.Sequential]] = None
107107
if conv.training:
108108
map_to_fused_module_train = {
@@ -123,7 +123,7 @@ def fuse_conv_freezebn_relu(conv, bn, relu):
123123

124124

125125
def fuse_deconv_freezebn(deconv, bn):
126-
assert(bn.training is False), "Freezebn must be eval."
126+
assert bn.training is False, "Freezebn must be eval."
127127

128128
if deconv.training:
129129
assert bn.num_features == deconv.out_channels, 'Output channel of ConvTranspose2d must match num_features of BatchNorm2d'
@@ -135,7 +135,7 @@ def fuse_deconv_freezebn(deconv, bn):
135135

136136

137137
def fuse_deconv_freezebn_relu(deconv, bn, relu):
138-
assert(deconv.training == relu.training and bn.training is False), "Conv and relu both must be in the same mode (train or eval) and bn must be eval."
138+
assert deconv.training == relu.training and bn.training is False, "Conv and relu both must be in the same mode (train or eval) and bn must be eval."
139139

140140
if deconv.training:
141141
assert bn.num_features == deconv.out_channels, 'Output channel of ConvTranspose2d must match num_features of BatchNorm2d'

mqbench/mix_precision/mix_precision.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,7 @@ def hawq(model: Module, data: Tuple, criterion, type='trace'):
163163
elif type == 'trace':
164164
return hessian_comp.layer_trace()
165165
else:
166-
raise(NotImplementedError, "{} is not supported, only trace and eigenvalues.".format(type))
166+
raise NotImplementedError("{} is not supported, only trace and eigenvalues.".format(type))
167167

168168

169169
def mixprecision_bit_selection(bitwidth_list, sensetive_dict, layer_parameters_dict, model_size_constraints, latency_constraints):

requirements.txt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
torch==1.10.0
22
torchvision==0.11.1
3-
onnx
4-
prettytable
3+
onnx==1.7.0
4+
prettytable

0 commit comments

Comments
 (0)