|
13 | 13 | # limitations under the License. |
14 | 14 |
|
15 | 15 | import sys |
16 | | -import tempfile |
17 | 16 | import unittest |
18 | 17 |
|
19 | | -import numpy as np |
20 | 18 | import torch |
21 | 19 | from parameterized import parameterized |
22 | 20 | from transformers import AutoTokenizer, GlmModel |
|
27 | 25 | require_peft_backend, |
28 | 26 | require_torch_accelerator, |
29 | 27 | skip_mps, |
30 | | - torch_device, |
31 | 28 | ) |
32 | 29 |
|
33 | 30 |
|
@@ -119,35 +116,6 @@ def test_simple_inference_with_text_lora_denoiser_fused_multi(self): |
119 | 116 | def test_simple_inference_with_text_denoiser_lora_unfused(self): |
120 | 117 | super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3) |
121 | 118 |
|
122 | | - def test_simple_inference_save_pretrained(self): |
123 | | - """ |
124 | | - Tests a simple usecase where users could use saving utilities for LoRA through save_pretrained |
125 | | - """ |
126 | | - for scheduler_cls in self.scheduler_classes: |
127 | | - components, _, _ = self.get_dummy_components(scheduler_cls) |
128 | | - pipe = self.pipeline_class(**components) |
129 | | - pipe = pipe.to(torch_device) |
130 | | - pipe.set_progress_bar_config(disable=None) |
131 | | - _, _, inputs = self.get_dummy_inputs(with_generator=False) |
132 | | - |
133 | | - output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] |
134 | | - self.assertTrue(output_no_lora.shape == self.output_shape) |
135 | | - |
136 | | - images_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] |
137 | | - |
138 | | - with tempfile.TemporaryDirectory() as tmpdirname: |
139 | | - pipe.save_pretrained(tmpdirname) |
140 | | - |
141 | | - pipe_from_pretrained = self.pipeline_class.from_pretrained(tmpdirname) |
142 | | - pipe_from_pretrained.to(torch_device) |
143 | | - |
144 | | - images_lora_save_pretrained = pipe_from_pretrained(**inputs, generator=torch.manual_seed(0))[0] |
145 | | - |
146 | | - self.assertTrue( |
147 | | - np.allclose(images_lora, images_lora_save_pretrained, atol=1e-3, rtol=1e-3), |
148 | | - "Loading from saved checkpoints should give same results.", |
149 | | - ) |
150 | | - |
151 | 119 | @parameterized.expand([("block_level", True), ("leaf_level", False)]) |
152 | 120 | @require_torch_accelerator |
153 | 121 | def test_group_offloading_inference_denoiser(self, offload_type, use_stream): |
|
0 commit comments