We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 6cf9a78 commit 0736f87Copy full SHA for 0736f87
tests/quantization/quanto/test_quanto.py
@@ -309,7 +309,6 @@ def test_training(self):
309
for module in quantized_model.modules():
310
if isinstance(module, LoRALayer):
311
self.assertTrue(module.adapter[1].weight.grad is not None)
312
- self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0)
313
314
315
class FluxTransformerFloat8WeightsTest(FluxTransformerQuantoMixin, unittest.TestCase):
0 commit comments