Skip to content

Commit 9bec383

Browse files
author
Joey Tsai
committed
[Fix lint]
1 parent 03836c8 commit 9bec383

File tree

4 files changed

+11
-5
lines changed

4 files changed

+11
-5
lines changed

backends/qualcomm/quantizer/quantizer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,9 +26,9 @@
2626
get_16a4w_qnn_ptq_config,
2727
get_16a8w_qnn_ptq_config,
2828
get_default_16bit_qnn_ptq_config,
29+
get_default_8bit_qat_proto,
2930
get_default_8bit_qnn_ptq_config,
3031
get_ptq_per_channel_quant_config,
31-
get_default_8bit_qat_proto,
3232
OP_ANNOTATOR,
3333
QuantizationConfig,
3434
)

backends/qualcomm/quantizer/utils.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,9 +25,9 @@
2525
FixedQParamsObserver,
2626
MinMaxObserver,
2727
MovingAverageMinMaxObserver,
28+
MovingAveragePerChannelMinMaxObserver,
2829
PerChannelMinMaxObserver,
2930
UniformQuantizationObserverBase,
30-
MovingAveragePerChannelMinMaxObserver,
3131
)
3232

3333
from torch.ao.quantization.quantizer import (
@@ -203,7 +203,9 @@ def get_default_8bit_qat_proto(act_symmetric: bool = False) -> QuantizationConfi
203203
quant_max=torch.iinfo(torch.int8).max,
204204
qscheme=torch.per_tensor_symmetric,
205205
ch_axis=0,
206-
observer_or_fake_quant_ctr=FusedMovingAvgObsFakeQuantize.with_args(observer=MovingAverageMinMaxObserver),
206+
observer_or_fake_quant_ctr=FusedMovingAvgObsFakeQuantize.with_args(
207+
observer=MovingAverageMinMaxObserver
208+
),
207209
)
208210

209211
bias_quantization_spec = QuantizationSpec(

backends/qualcomm/tests/test_qnn_delegate.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1054,7 +1054,7 @@ def test_qnn_backend_linear_qat(self):
10541054

10551055
module = self.get_prepared_qat_module(module, sample_input)
10561056

1057-
optimizer = torch.optim.SGD(module.parameters(), lr = 0.1)
1057+
optimizer = torch.optim.SGD(module.parameters(), lr=0.1)
10581058
criterion = torch.nn.CrossEntropyLoss()
10591059
output = module(*sample_input)
10601060
loss = criterion(output, module(*sample_input))

backends/qualcomm/tests/utils.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,11 @@
4545
from executorch.exir.pass_base import ExportPass
4646
from executorch.exir.passes.memory_planning_pass import MemoryPlanningPass
4747
from executorch.exir.program import ExecutorchProgram, ExecutorchProgramManager
48-
from torch.ao.quantization.quantize_pt2e import convert_pt2e, prepare_pt2e, prepare_qat_pt2e
48+
from torch.ao.quantization.quantize_pt2e import (
49+
convert_pt2e,
50+
prepare_pt2e,
51+
prepare_qat_pt2e,
52+
)
4953

5054

5155
def generate_context_binary(

0 commit comments

Comments
 (0)