We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 5704376 commit ac1a6ecCopy full SHA for ac1a6ec
tests/lora/test_lora_layers_flux.py
@@ -167,7 +167,7 @@ def test_modify_padding_mode(self):
167
@slow
168
@require_torch_gpu
169
@require_peft_backend
170
-# @unittest.skip("We cannot run inference on this model with the current CI hardware")
+@unittest.skip("We cannot run inference on this model with the current CI hardware")
171
# TODO (DN6, sayakpaul): move these tests to a beefier GPU
172
class FluxLoRAIntegrationTests(unittest.TestCase):
173
"""internal note: The integration slices were obtained on audace.
0 commit comments