Skip to content

Commit 8994f7b

Browse files
reuvenperetzreuvenp
andauthored
Add compatibility test for pytorch (#74)
* Add compatibility test for Pytorch to ensure models saved in previous releases can be loaded using new versions. --------- Co-authored-by: reuvenp <reuvenp@altair-semi.com>
1 parent 7b2acf2 commit 8994f7b

14 files changed

+1134
-0
lines changed
Lines changed: 60 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,60 @@
1+
name: Run Torch Quantizers Compatibility Tests
2+
3+
on:
4+
workflow_call:
5+
inputs:
6+
save_version:
7+
description: 'MCT Quantizers version to save models'
8+
required: true
9+
type: string
10+
python_version:
11+
description: 'Python version'
12+
required: true
13+
type: string
14+
default: '3.10.*'
15+
torch_version:
16+
description: 'Torch version'
17+
required: true
18+
type: string
19+
default: '2.0.*'
20+
21+
jobs:
22+
run-torch-tests:
23+
runs-on: ubuntu-latest
24+
steps:
25+
- uses: actions/checkout@v2
26+
with:
27+
fetch-depth: 0
28+
- name: Install Python 3
29+
uses: actions/setup-python@v1
30+
with:
31+
python-version: ${{ inputs.python_version }}
32+
- name: Install dependencies
33+
run: |
34+
python -m pip install --upgrade pip
35+
pip install -r requirements.txt
36+
pip install torch==${{ inputs.torch_version }} onnx onnxruntime onnxruntime-extensions
37+
- name: Checkout to MCT Quantizers requested tag for saving test models
38+
run: |
39+
git checkout tags/${{ inputs.save_version }}
40+
- name: Run save model tests
41+
run: |
42+
cd tests
43+
echo "Current directory: $PWD"
44+
export PYTHONPATH="$PWD:${PYTHONPATH}"
45+
echo "Updated PYTHONPATH: $PYTHONPATH"
46+
cd ..
47+
python tests/compatibility_tests/torch_comp_tests/compatibility_weights_save_model_test_suite.py ${{ inputs.save_version }}
48+
python tests/compatibility_tests/torch_comp_tests/compatibility_activation_save_model_test_suite.py ${{ inputs.save_version }}
49+
- name: Checkout to MCT Quantizers latest version
50+
run: |
51+
git checkout main
52+
- name: Run load model tests with latest version
53+
run: |
54+
cd tests
55+
echo "Current directory: $PWD"
56+
export PYTHONPATH="$PWD:${PYTHONPATH}"
57+
echo "Updated PYTHONPATH: $PYTHONPATH"
58+
cd ..
59+
python tests/compatibility_tests/torch_comp_tests/compatibility_weights_load_model_test_suite.py ${{ inputs.save_version }}
60+
python tests/compatibility_tests/torch_comp_tests/compatibility_activation_load_model_test_suite.py ${{ inputs.save_version }}
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
name: Run Backward Compatibility Test - Pytorch 2.0 MCTQ v1.4.0
2+
on:
3+
workflow_dispatch: # Allow manual triggers
4+
5+
jobs:
6+
run-comp-torch-2_0-v1_4:
7+
uses: ./.github/workflows/compatibility_torch_tests.yml
8+
with:
9+
save_version: "v1.4.0"
10+
python_version: "3.10"
11+
torch_version: "2.0.*"
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
name: Run Backward Compatibility Test - Pytorch 2.1 MCTQ v1.4.0
2+
on:
3+
workflow_dispatch: # Allow manual triggers
4+
5+
jobs:
6+
run-comp-torch-2_1-v1_4:
7+
uses: ./.github/workflows/compatibility_torch_tests.yml
8+
with:
9+
save_version: "v1.4.0"
10+
python_version: "3.10"
11+
torch_version: "2.1.*"
Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
# ==============================================================================
Lines changed: 172 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,172 @@
1+
# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
# ==============================================================================
15+
import os
16+
import unittest
17+
import torch
18+
from mct_quantizers import get_ort_session_options
19+
import onnxruntime as ort
20+
21+
from mct_quantizers import PytorchActivationQuantizationHolder
22+
from mct_quantizers.pytorch.quantizers import ActivationPOTInferableQuantizer, ActivationSymmetricInferableQuantizer, \
23+
ActivationUniformInferableQuantizer, ActivationLutPOTInferableQuantizer
24+
25+
LAYER2NAME = {torch.nn.ReLU: 'relu',
26+
torch.nn.LeakyReLU: 'leaky_relu',
27+
torch.add: 'add',
28+
torch.nn.SiLU: 'swish',
29+
torch.mul: 'mul'}
30+
31+
QUANTIZER2NAME = {ActivationPOTInferableQuantizer: 'pot',
32+
ActivationSymmetricInferableQuantizer: 'sym',
33+
ActivationUniformInferableQuantizer: 'unf',
34+
ActivationLutPOTInferableQuantizer: 'pot_lut'}
35+
36+
QUANTIZER2ARGS = {**dict.fromkeys([ActivationPOTInferableQuantizer, ActivationSymmetricInferableQuantizer],
37+
{'num_bits': 4,
38+
'threshold': [0.5],
39+
'signed': True
40+
}),
41+
ActivationUniformInferableQuantizer:
42+
{'num_bits': 4,
43+
'min_range': [-2.0],
44+
'max_range': [3.0]
45+
},
46+
ActivationLutPOTInferableQuantizer:
47+
{'num_bits': 4,
48+
'threshold': [0.5],
49+
'signed': True,
50+
'lut_values': [22.0, -53.0, 62.0, 0.0, -66.0, -21.0, 44.0, -40.0],
51+
'lut_values_bitwidth': 8,
52+
'eps': 1e-8
53+
}
54+
}
55+
56+
def _build_model_with_quantization_holder(act_layer, quant_activation_holder, input_shape, model_name):
57+
class Model(torch.nn.Module):
58+
def __init__(self):
59+
super(Model, self).__init__()
60+
self.conv = torch.nn.Conv2d(in_channels=3, out_channels=3, kernel_size=4)
61+
self.act_layer = act_layer
62+
self.quant_activation_holder = quant_activation_holder
63+
64+
def forward(self, inp):
65+
z = self.conv(inp)
66+
y = self.act_layer(z)
67+
x = self.quant_activation_holder(y)
68+
return x, y
69+
70+
return Model()
71+
72+
73+
def _build_model_with_operator_quantization_holder(act_layer, quant_activation_holder, input_shape, model_name):
74+
class Model(torch.nn.Module):
75+
def __init__(self):
76+
super(Model, self).__init__()
77+
self.conv1 = torch.nn.Conv2d(in_channels=3, out_channels=3, kernel_size=4)
78+
self.conv2 = torch.nn.Conv2d(in_channels=3, out_channels=3, kernel_size=4)
79+
self.act_layer = act_layer
80+
self.quant_activation_holder = quant_activation_holder
81+
82+
def forward(self, inp):
83+
z1 = self.conv1(inp)
84+
z2 = self.conv2(inp)
85+
y = self.act_layer(z1,z2)
86+
x = self.quant_activation_holder(y)
87+
return x, y
88+
89+
return Model()
90+
91+
class BaseActivationQuantizerBuildAndSaveTest(unittest.TestCase):
92+
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
93+
VERSION = None
94+
95+
def build_and_save_model(self, quantizer, quantizer_params, layer, model_name, input_shape, is_op=False):
96+
assert BaseActivationQuantizerBuildAndSaveTest.VERSION is not None
97+
act_quantizer = quantizer(**quantizer_params)
98+
act_quantizer.enable_custom_impl()
99+
100+
quant_act_holder = PytorchActivationQuantizationHolder(activation_holder_quantizer=act_quantizer)
101+
102+
103+
if is_op:
104+
model = _build_model_with_operator_quantization_holder(act_layer=layer,
105+
quant_activation_holder=quant_act_holder,
106+
input_shape=input_shape,
107+
model_name=model_name)
108+
else:
109+
model = _build_model_with_quantization_holder(act_layer=layer,
110+
quant_activation_holder=quant_act_holder,
111+
input_shape=input_shape,
112+
model_name=model_name)
113+
114+
115+
quant_holder_layer = [_l for _, _l in model.named_modules() if isinstance(_l, PytorchActivationQuantizationHolder)]
116+
self.assertEqual(len(quant_holder_layer), 1)
117+
118+
rand_inp = torch.rand(1, *input_shape).to(BaseActivationQuantizerBuildAndSaveTest.device)
119+
model = model.to(BaseActivationQuantizerBuildAndSaveTest.device)
120+
121+
# Verifying activation quantization after holder
122+
output = model(rand_inp)
123+
self.assertTrue(torch.any(output[0] != output[1]), "Expecting activation layer output to be different "
124+
"from the activation holder layer output, which should be "
125+
"quantized.")
126+
127+
file_path = f'{model_name}.onnx'
128+
torch.onnx.export(model,
129+
rand_inp,
130+
file_path,
131+
opset_version=16,
132+
verbose=False,
133+
input_names=['input'],
134+
output_names=['output'],
135+
dynamic_axes={'input': {0: 'batch_size'},
136+
'output': {0: 'batch_size'}})
137+
138+
def activation_test(self, quantizer, layer, is_op=False, layer_type=None):
139+
self.build_and_save_model(quantizer=quantizer,
140+
quantizer_params=QUANTIZER2ARGS[quantizer],
141+
layer=layer(),
142+
model_name=f"{BaseActivationQuantizerBuildAndSaveTest.VERSION}_"
143+
f"{LAYER2NAME[layer_type if layer_type is not None else layer]}_"
144+
f"{QUANTIZER2NAME[quantizer]}",
145+
input_shape=(3, 8, 8),
146+
is_op=is_op)
147+
148+
149+
class BaseActivationQuantizerLoadAndCompareTest(unittest.TestCase):
150+
SAVED_VERSION = None
151+
152+
def load_and_compare_model(self, quantizer_type, layer_type):
153+
assert BaseActivationQuantizerLoadAndCompareTest.SAVED_VERSION is not None
154+
155+
model_path = (f"{BaseActivationQuantizerLoadAndCompareTest.SAVED_VERSION}_"
156+
f"{LAYER2NAME[layer_type]}_"
157+
f"{QUANTIZER2NAME[quantizer_type]}.onnx")
158+
159+
ort.InferenceSession(model_path,
160+
get_ort_session_options(),
161+
providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
162+
163+
self._check_quantizer_init_from_onnx_model(model_path)
164+
os.remove(model_path)
165+
166+
def _check_quantizer_init_from_onnx_model(self, filepath):
167+
raise NotImplemented
168+
169+
def activation_test(self, quantizer_type, layer):
170+
self.load_and_compare_model(quantizer_type=quantizer_type,
171+
layer_type=layer)
172+

0 commit comments

Comments
 (0)