|  | 
| 75 | 75 |     nightly, | 
| 76 | 76 |     require_compel, | 
| 77 | 77 |     require_flax, | 
|  | 78 | +    require_hf_hub_version_greater, | 
| 78 | 79 |     require_onnxruntime, | 
| 79 | 80 |     require_torch_2, | 
| 80 | 81 |     require_torch_gpu, | 
|  | 82 | +    require_transformers_version_greater, | 
| 81 | 83 |     run_test_in_subprocess, | 
| 82 | 84 |     slow, | 
| 83 | 85 |     torch_device, | 
| @@ -1802,6 +1804,25 @@ def test_pipe_same_device_id_offload(self): | 
| 1802 | 1804 |         sd.maybe_free_model_hooks() | 
| 1803 | 1805 |         assert sd._offload_gpu_id == 5 | 
| 1804 | 1806 | 
 | 
|  | 1807 | +    @require_hf_hub_version_greater("0.26.5") | 
|  | 1808 | +    @require_transformers_version_greater("4.47.0") | 
|  | 1809 | +    @parameterized.expand([torch.float32, torch.float16]) | 
|  | 1810 | +    def test_load_dduf_from_hub(self, dtype): | 
|  | 1811 | +        with tempfile.TemporaryDirectory() as tmpdir: | 
|  | 1812 | +            pipe = DiffusionPipeline.from_pretrained( | 
|  | 1813 | +                "DDUF/tiny-flux-dev-pipe-dduf", dduf_file="fluxpipeline.dduf", cache_dir=tmpdir, torch_dtype=dtype | 
|  | 1814 | +            ).to(torch_device) | 
|  | 1815 | +            out_1 = pipe(prompt="dog", num_inference_steps=5, generator=torch.manual_seed(0), output_type="np").images | 
|  | 1816 | + | 
|  | 1817 | +            pipe.save_pretrained(tmpdir) | 
|  | 1818 | +            loaded_pipe = DiffusionPipeline.from_pretrained(tmpdir, torch_dtype=dtype).to(torch_device) | 
|  | 1819 | + | 
|  | 1820 | +            out_2 = loaded_pipe( | 
|  | 1821 | +                prompt="dog", num_inference_steps=5, generator=torch.manual_seed(0), output_type="np" | 
|  | 1822 | +            ).images | 
|  | 1823 | + | 
|  | 1824 | +        self.assertTrue(np.allclose(out_1, out_2, atol=1e-4, rtol=1e-4)) | 
|  | 1825 | + | 
| 1805 | 1826 | 
 | 
| 1806 | 1827 | @slow | 
| 1807 | 1828 | @require_torch_gpu | 
|  | 
0 commit comments