We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 0af26b2 commit 94ec97dCopy full SHA for 94ec97d
modelopt/torch/quantization/nn/modules/tensor_quantizer.py
@@ -959,7 +959,7 @@ def forward(self, inputs):
959
and self.block_sizes.get("type", None) != "dynamic"
960
and self._fake_quant
961
):
962
- # Reshape is required if the logic isnt handled in the simulation kernel
+ # Reshape is required if the logic is not handled in the simulation kernel
963
self._setup_for_blockquant(inputs)
964
setattr(self, "_original_input_shape", inputs.shape)
965
inputs = self._process_for_blockquant(inputs)
0 commit comments