|  | 
| 18 | 18 | import torch | 
| 19 | 19 | 
 | 
| 20 | 20 | from diffusers.models import ModelMixin | 
|  | 21 | +from diffusers.pipelines.pipeline_utils import DiffusionPipeline | 
| 21 | 22 | from diffusers.utils.testing_utils import require_torch_gpu, torch_device | 
| 22 | 23 | 
 | 
| 23 | 24 | 
 | 
| @@ -56,6 +57,20 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: | 
| 56 | 57 |         return x | 
| 57 | 58 | 
 | 
| 58 | 59 | 
 | 
|  | 60 | +class DummyPipeline(DiffusionPipeline): | 
|  | 61 | +    model_cpu_offload_seq = "model" | 
|  | 62 | + | 
|  | 63 | +    def __init__(self, model: torch.nn.Module) -> None: | 
|  | 64 | +        super().__init__() | 
|  | 65 | + | 
|  | 66 | +        self.register_modules(model=model) | 
|  | 67 | + | 
|  | 68 | +    def __call__(self, x: torch.Tensor) -> torch.Tensor: | 
|  | 69 | +        for _ in range(2): | 
|  | 70 | +            x = x + 0.1 * self.model(x) | 
|  | 71 | +        return x | 
|  | 72 | + | 
|  | 73 | + | 
| 59 | 74 | @require_torch_gpu | 
| 60 | 75 | class GroupOffloadTests(unittest.TestCase): | 
| 61 | 76 |     in_features = 64 | 
| @@ -151,3 +166,27 @@ def test_error_raised_if_supports_group_offloading_false(self): | 
| 151 | 166 |         self.model._supports_group_offloading = False | 
| 152 | 167 |         with self.assertRaisesRegex(ValueError, "does not support group offloading"): | 
| 153 | 168 |             self.model.enable_group_offload(onload_device=torch.device("cuda")) | 
|  | 169 | + | 
|  | 170 | +    def test_error_raised_if_model_offloading_applied_on_group_offloaded_module(self): | 
|  | 171 | +        pipe = DummyPipeline(self.model) | 
|  | 172 | +        pipe.model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=3) | 
|  | 173 | +        with self.assertRaisesRegex(ValueError, "You are trying to apply model/sequential CPU offloading"): | 
|  | 174 | +            pipe.enable_model_cpu_offload() | 
|  | 175 | + | 
|  | 176 | +    def test_error_raised_if_sequential_offloading_applied_on_group_offloaded_module(self): | 
|  | 177 | +        pipe = DummyPipeline(self.model) | 
|  | 178 | +        pipe.model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=3) | 
|  | 179 | +        with self.assertRaisesRegex(ValueError, "You are trying to apply model/sequential CPU offloading"): | 
|  | 180 | +            pipe.enable_sequential_cpu_offload() | 
|  | 181 | + | 
|  | 182 | +    def test_error_raised_if_group_offloading_applied_on_model_offloaded_module(self): | 
|  | 183 | +        pipe = DummyPipeline(self.model) | 
|  | 184 | +        pipe.enable_model_cpu_offload() | 
|  | 185 | +        with self.assertRaisesRegex(ValueError, "Cannot apply group offloading"): | 
|  | 186 | +            pipe.model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=3) | 
|  | 187 | + | 
|  | 188 | +    def test_error_raised_if_group_offloading_applied_on_sequential_offloaded_module(self): | 
|  | 189 | +        pipe = DummyPipeline(self.model) | 
|  | 190 | +        pipe.enable_sequential_cpu_offload() | 
|  | 191 | +        with self.assertRaisesRegex(ValueError, "Cannot apply group offloading"): | 
|  | 192 | +            pipe.model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=3) | 
0 commit comments