Skip to content

Commit 27fe7c5

Browse files
committed
update
1 parent db03fc9 commit 27fe7c5

File tree

3 files changed

+0
-51
lines changed

3 files changed

+0
-51
lines changed

tests/lora/test_lora_layers_cogview4.py

Lines changed: 0 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -13,10 +13,8 @@
1313
# limitations under the License.
1414

1515
import sys
16-
import tempfile
1716
import unittest
1817

19-
import numpy as np
2018
import torch
2119
from parameterized import parameterized
2220
from transformers import AutoTokenizer, GlmModel
@@ -27,7 +25,6 @@
2725
require_peft_backend,
2826
require_torch_accelerator,
2927
skip_mps,
30-
torch_device,
3128
)
3229

3330

@@ -119,35 +116,6 @@ def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
119116
def test_simple_inference_with_text_denoiser_lora_unfused(self):
120117
super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3)
121118

122-
def test_simple_inference_save_pretrained(self):
123-
"""
124-
Tests a simple usecase where users could use saving utilities for LoRA through save_pretrained
125-
"""
126-
for scheduler_cls in self.scheduler_classes:
127-
components, _, _ = self.get_dummy_components(scheduler_cls)
128-
pipe = self.pipeline_class(**components)
129-
pipe = pipe.to(torch_device)
130-
pipe.set_progress_bar_config(disable=None)
131-
_, _, inputs = self.get_dummy_inputs(with_generator=False)
132-
133-
output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0]
134-
self.assertTrue(output_no_lora.shape == self.output_shape)
135-
136-
images_lora = pipe(**inputs, generator=torch.manual_seed(0))[0]
137-
138-
with tempfile.TemporaryDirectory() as tmpdirname:
139-
pipe.save_pretrained(tmpdirname)
140-
141-
pipe_from_pretrained = self.pipeline_class.from_pretrained(tmpdirname)
142-
pipe_from_pretrained.to(torch_device)
143-
144-
images_lora_save_pretrained = pipe_from_pretrained(**inputs, generator=torch.manual_seed(0))[0]
145-
146-
self.assertTrue(
147-
np.allclose(images_lora, images_lora_save_pretrained, atol=1e-3, rtol=1e-3),
148-
"Loading from saved checkpoints should give same results.",
149-
)
150-
151119
@parameterized.expand([("block_level", True), ("leaf_level", False)])
152120
@require_torch_accelerator
153121
def test_group_offloading_inference_denoiser(self, offload_type, use_stream):

tests/lora/test_lora_layers_hunyuanvideo.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -156,11 +156,6 @@ def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
156156
def test_simple_inference_with_text_denoiser_lora_unfused(self):
157157
super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3)
158158

159-
# TODO(aryan): Fix the following test
160-
@unittest.skip("This test fails with an error I haven't been able to debug yet.")
161-
def test_simple_inference_save_pretrained(self):
162-
pass
163-
164159
@unittest.skip("Not supported in HunyuanVideo.")
165160
def test_simple_inference_with_text_denoiser_block_scale(self):
166161
pass

tests/lora/utils.py

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -329,20 +329,6 @@ def add_adapters_to_pipeline(self, pipe, text_lora_config=None, denoiser_lora_co
329329
)
330330
return pipe, denoiser
331331

332-
def test_simple_inference(self):
333-
"""
334-
Tests a simple inference and makes sure it works as expected
335-
"""
336-
for scheduler_cls in self.scheduler_classes:
337-
components, text_lora_config, _ = self.get_dummy_components(scheduler_cls)
338-
pipe = self.pipeline_class(**components)
339-
pipe = pipe.to(torch_device)
340-
pipe.set_progress_bar_config(disable=None)
341-
342-
_, _, inputs = self.get_dummy_inputs()
343-
output_no_lora = pipe(**inputs)[0]
344-
self.assertTrue(output_no_lora.shape == self.output_shape)
345-
346332
@require_peft_version_greater("0.13.1")
347333
def test_low_cpu_mem_usage_with_injection(self):
348334
"""Tests if we can inject LoRA state dict with low_cpu_mem_usage."""

0 commit comments

Comments
 (0)