diff --git a/src/diffusers/loaders/single_file_utils.py b/src/diffusers/loaders/single_file_utils.py index d620c15e8377..236fbd0c2295 100644 --- a/src/diffusers/loaders/single_file_utils.py +++ b/src/diffusers/loaders/single_file_utils.py @@ -14,6 +14,7 @@ # limitations under the License. """Conversion script for the Stable Diffusion checkpoints.""" +import copy import os import re from contextlib import nullcontext @@ -91,11 +92,11 @@ "xl_inpaint": {"pretrained_model_name_or_path": "diffusers/stable-diffusion-xl-1.0-inpainting-0.1"}, "playground-v2-5": {"pretrained_model_name_or_path": "playgroundai/playground-v2.5-1024px-aesthetic"}, "upscale": {"pretrained_model_name_or_path": "stabilityai/stable-diffusion-x4-upscaler"}, - "inpainting": {"pretrained_model_name_or_path": "Lykon/dreamshaper-8-inpainting"}, + "inpainting": {"pretrained_model_name_or_path": "stable-diffusion-v1-5/stable-diffusion-inpainting"}, "inpainting_v2": {"pretrained_model_name_or_path": "stabilityai/stable-diffusion-2-inpainting"}, "controlnet": {"pretrained_model_name_or_path": "lllyasviel/control_v11p_sd15_canny"}, "v2": {"pretrained_model_name_or_path": "stabilityai/stable-diffusion-2-1"}, - "v1": {"pretrained_model_name_or_path": "Lykon/dreamshaper-8"}, + "v1": {"pretrained_model_name_or_path": "stable-diffusion-v1-5/stable-diffusion-v1-5"}, "stable_cascade_stage_b": {"pretrained_model_name_or_path": "stabilityai/stable-cascade", "subfolder": "decoder"}, "stable_cascade_stage_b_lite": { "pretrained_model_name_or_path": "stabilityai/stable-cascade", @@ -541,6 +542,7 @@ def infer_diffusers_model_type(checkpoint): def fetch_diffusers_config(checkpoint): model_type = infer_diffusers_model_type(checkpoint) model_path = DIFFUSERS_DEFAULT_PIPELINE_PATHS[model_type] + model_path = copy.deepcopy(model_path) return model_path diff --git a/tests/lora/test_lora_layers_sd.py b/tests/lora/test_lora_layers_sd.py index 5d79bb0c50bc..50187e50a912 100644 --- a/tests/lora/test_lora_layers_sd.py +++ b/tests/lora/test_lora_layers_sd.py @@ -102,7 +102,7 @@ def tearDown(self): @slow @require_torch_gpu def test_integration_move_lora_cpu(self): - path = "Jiali/stable-diffusion-1.5" + path = "stable-diffusion-v1-5/stable-diffusion-v1-5" lora_id = "takuma104/lora-test-text-encoder-lora-target" pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16) @@ -161,7 +161,7 @@ def test_integration_move_lora_cpu(self): def test_integration_move_lora_dora_cpu(self): from peft import LoraConfig - path = "Jiali/stable-diffusion-1.5" + path = "stable-diffusion-v1-5/stable-diffusion-v1-5" unet_lora_config = LoraConfig( init_lora_weights="gaussian", target_modules=["to_k", "to_q", "to_v", "to_out.0"], @@ -221,7 +221,7 @@ def tearDown(self): torch.cuda.empty_cache() def test_integration_logits_with_scale(self): - path = "Jiali/stable-diffusion-1.5" + path = "stable-diffusion-v1-5/stable-diffusion-v1-5" lora_id = "takuma104/lora-test-text-encoder-lora-target" pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float32) @@ -253,7 +253,7 @@ def test_integration_logits_with_scale(self): release_memory(pipe) def test_integration_logits_no_scale(self): - path = "Jiali/stable-diffusion-1.5" + path = "stable-diffusion-v1-5/stable-diffusion-v1-5" lora_id = "takuma104/lora-test-text-encoder-lora-target" pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float32) @@ -284,7 +284,7 @@ def test_dreambooth_old_format(self): lora_model_id = "hf-internal-testing/lora_dreambooth_dog_example" - base_model_id = "Jiali/stable-diffusion-1.5" + base_model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" pipe = StableDiffusionPipeline.from_pretrained(base_model_id, safety_checker=None) pipe = pipe.to(torch_device) @@ -308,7 +308,7 @@ def test_dreambooth_text_encoder_new_format(self): lora_model_id = "hf-internal-testing/lora-trained" - base_model_id = "Jiali/stable-diffusion-1.5" + base_model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" pipe = StableDiffusionPipeline.from_pretrained(base_model_id, safety_checker=None) pipe = pipe.to(torch_device) @@ -419,9 +419,9 @@ def test_a1111_with_sequential_cpu_offload(self): def test_kohya_sd_v15_with_higher_dimensions(self): generator = torch.Generator().manual_seed(0) - pipe = StableDiffusionPipeline.from_pretrained("Jiali/stable-diffusion-1.5", safety_checker=None).to( - torch_device - ) + pipe = StableDiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None + ).to(torch_device) lora_model_id = "hf-internal-testing/urushisato-lora" lora_filename = "urushisato_v15.safetensors" pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) @@ -444,7 +444,7 @@ def test_vanilla_funetuning(self): lora_model_id = "hf-internal-testing/sd-model-finetuned-lora-t4" - base_model_id = "Jiali/stable-diffusion-1.5" + base_model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" pipe = StableDiffusionPipeline.from_pretrained(base_model_id, safety_checker=None) pipe = pipe.to(torch_device) @@ -467,9 +467,9 @@ def test_unload_kohya_lora(self): prompt = "masterpiece, best quality, mountain" num_inference_steps = 2 - pipe = StableDiffusionPipeline.from_pretrained("Jiali/stable-diffusion-1.5", safety_checker=None).to( - torch_device - ) + pipe = StableDiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None + ).to(torch_device) initial_images = pipe( prompt, output_type="np", generator=generator, num_inference_steps=num_inference_steps ).images @@ -505,9 +505,9 @@ def test_load_unload_load_kohya_lora(self): prompt = "masterpiece, best quality, mountain" num_inference_steps = 2 - pipe = StableDiffusionPipeline.from_pretrained("Jiali/stable-diffusion-1.5", safety_checker=None).to( - torch_device - ) + pipe = StableDiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None + ).to(torch_device) initial_images = pipe( prompt, output_type="np", generator=generator, num_inference_steps=num_inference_steps ).images @@ -547,9 +547,9 @@ def test_load_unload_load_kohya_lora(self): def test_not_empty_state_dict(self): # Makes sure https://github.com/huggingface/diffusers/issues/7054 does not happen again - pipe = AutoPipelineForText2Image.from_pretrained("Jiali/stable-diffusion-1.5", torch_dtype=torch.float16).to( - torch_device - ) + pipe = AutoPipelineForText2Image.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16 + ).to(torch_device) pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) cached_file = hf_hub_download("hf-internal-testing/lcm-lora-test-sd-v1-5", "test_lora.safetensors") @@ -561,9 +561,9 @@ def test_not_empty_state_dict(self): def test_load_unload_load_state_dict(self): # Makes sure https://github.com/huggingface/diffusers/issues/7054 does not happen again - pipe = AutoPipelineForText2Image.from_pretrained("Jiali/stable-diffusion-1.5", torch_dtype=torch.float16).to( - torch_device - ) + pipe = AutoPipelineForText2Image.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16 + ).to(torch_device) pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) cached_file = hf_hub_download("hf-internal-testing/lcm-lora-test-sd-v1-5", "test_lora.safetensors") @@ -580,7 +580,9 @@ def test_load_unload_load_state_dict(self): release_memory(pipe) def test_sdv1_5_lcm_lora(self): - pipe = DiffusionPipeline.from_pretrained("Jiali/stable-diffusion-1.5", torch_dtype=torch.float16) + pipe = DiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16 + ) pipe.to(torch_device) pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) @@ -608,7 +610,9 @@ def test_sdv1_5_lcm_lora(self): release_memory(pipe) def test_sdv1_5_lcm_lora_img2img(self): - pipe = AutoPipelineForImage2Image.from_pretrained("Jiali/stable-diffusion-1.5", torch_dtype=torch.float16) + pipe = AutoPipelineForImage2Image.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16 + ) pipe.to(torch_device) pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) @@ -649,7 +653,7 @@ def test_sd_load_civitai_empty_network_alpha(self): This test simply checks that loading a LoRA with an empty network alpha works fine See: https://github.com/huggingface/diffusers/issues/5606 """ - pipeline = StableDiffusionPipeline.from_pretrained("Jiali/stable-diffusion-1.5") + pipeline = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5") pipeline.enable_sequential_cpu_offload() civitai_path = hf_hub_download("ybelkada/test-ahi-civitai", "ahi_lora_weights.safetensors") pipeline.load_lora_weights(civitai_path, adapter_name="ahri") diff --git a/tests/models/autoencoders/test_models_vae.py b/tests/models/autoencoders/test_models_vae.py index cdf441c0990d..0188f9121ae0 100644 --- a/tests/models/autoencoders/test_models_vae.py +++ b/tests/models/autoencoders/test_models_vae.py @@ -1051,7 +1051,9 @@ def test_encode_decode(self): def test_sd(self): vae = ConsistencyDecoderVAE.from_pretrained("openai/consistency-decoder") # TODO - update - pipe = StableDiffusionPipeline.from_pretrained("Jiali/stable-diffusion-1.5", vae=vae, safety_checker=None) + pipe = StableDiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", vae=vae, safety_checker=None + ) pipe.to(torch_device) out = pipe( @@ -1099,7 +1101,7 @@ def test_sd_f16(self): "openai/consistency-decoder", torch_dtype=torch.float16 ) # TODO - update pipe = StableDiffusionPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", + "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, vae=vae, safety_checker=None, @@ -1124,7 +1126,7 @@ def test_sd_f16(self): def test_vae_tiling(self): vae = ConsistencyDecoderVAE.from_pretrained("openai/consistency-decoder", torch_dtype=torch.float16) pipe = StableDiffusionPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", vae=vae, safety_checker=None, torch_dtype=torch.float16 + "stable-diffusion-v1-5/stable-diffusion-v1-5", vae=vae, safety_checker=None, torch_dtype=torch.float16 ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) diff --git a/tests/pipelines/controlnet/test_controlnet.py b/tests/pipelines/controlnet/test_controlnet.py index 302712dbfd0d..b12655d989d4 100644 --- a/tests/pipelines/controlnet/test_controlnet.py +++ b/tests/pipelines/controlnet/test_controlnet.py @@ -73,7 +73,7 @@ def _test_stable_diffusion_compile(in_queue, out_queue, timeout): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") pipe = StableDiffusionControlNetPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", safety_checker=None, controlnet=controlnet + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.to("cuda") pipe.set_progress_bar_config(disable=None) @@ -715,7 +715,7 @@ def test_canny(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") pipe = StableDiffusionControlNetPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", safety_checker=None, controlnet=controlnet + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) @@ -742,7 +742,7 @@ def test_depth(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-depth") pipe = StableDiffusionControlNetPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", safety_checker=None, controlnet=controlnet + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) @@ -769,7 +769,7 @@ def test_hed(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-hed") pipe = StableDiffusionControlNetPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", safety_checker=None, controlnet=controlnet + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) @@ -796,7 +796,7 @@ def test_mlsd(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-mlsd") pipe = StableDiffusionControlNetPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", safety_checker=None, controlnet=controlnet + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) @@ -823,7 +823,7 @@ def test_normal(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-normal") pipe = StableDiffusionControlNetPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", safety_checker=None, controlnet=controlnet + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) @@ -850,7 +850,7 @@ def test_openpose(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-openpose") pipe = StableDiffusionControlNetPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", safety_checker=None, controlnet=controlnet + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) @@ -877,7 +877,7 @@ def test_scribble(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-scribble") pipe = StableDiffusionControlNetPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", safety_checker=None, controlnet=controlnet + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) @@ -904,7 +904,7 @@ def test_seg(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-seg") pipe = StableDiffusionControlNetPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", safety_checker=None, controlnet=controlnet + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) @@ -935,7 +935,7 @@ def test_sequential_cpu_offloading(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-seg") pipe = StableDiffusionControlNetPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", safety_checker=None, controlnet=controlnet + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() @@ -961,7 +961,7 @@ def test_canny_guess_mode(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") pipe = StableDiffusionControlNetPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", safety_checker=None, controlnet=controlnet + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) @@ -993,7 +993,7 @@ def test_canny_guess_mode_euler(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") pipe = StableDiffusionControlNetPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", safety_checker=None, controlnet=controlnet + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() @@ -1035,7 +1035,7 @@ def test_v11_shuffle_global_pool_conditions(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11e_sd15_shuffle") pipe = StableDiffusionControlNetPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", safety_checker=None, controlnet=controlnet + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) @@ -1081,7 +1081,9 @@ def test_pose_and_canny(self): controlnet_pose = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-openpose") pipe = StableDiffusionControlNetPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", safety_checker=None, controlnet=[controlnet_pose, controlnet_canny] + "stable-diffusion-v1-5/stable-diffusion-v1-5", + safety_checker=None, + controlnet=[controlnet_pose, controlnet_canny], ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) diff --git a/tests/pipelines/controlnet/test_controlnet_img2img.py b/tests/pipelines/controlnet/test_controlnet_img2img.py index 2b22b3e5a76d..7c4ae716b37d 100644 --- a/tests/pipelines/controlnet/test_controlnet_img2img.py +++ b/tests/pipelines/controlnet/test_controlnet_img2img.py @@ -407,7 +407,7 @@ def test_canny(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", safety_checker=None, controlnet=controlnet + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) diff --git a/tests/pipelines/controlnet/test_controlnet_inpaint.py b/tests/pipelines/controlnet/test_controlnet_inpaint.py index eba493c20588..e49106334c2e 100644 --- a/tests/pipelines/controlnet/test_controlnet_inpaint.py +++ b/tests/pipelines/controlnet/test_controlnet_inpaint.py @@ -504,7 +504,7 @@ def test_inpaint(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_inpaint") pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", safety_checker=None, controlnet=controlnet + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() diff --git a/tests/pipelines/controlnet/test_flax_controlnet.py b/tests/pipelines/controlnet/test_flax_controlnet.py index 6ab66ccb6139..bf5564e810ef 100644 --- a/tests/pipelines/controlnet/test_flax_controlnet.py +++ b/tests/pipelines/controlnet/test_flax_controlnet.py @@ -41,7 +41,7 @@ def test_canny(self): "lllyasviel/sd-controlnet-canny", from_pt=True, dtype=jnp.bfloat16 ) pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", controlnet=controlnet, from_pt=True, dtype=jnp.bfloat16 + "stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet, from_pt=True, dtype=jnp.bfloat16 ) params["controlnet"] = controlnet_params @@ -86,7 +86,7 @@ def test_pose(self): "lllyasviel/sd-controlnet-openpose", from_pt=True, dtype=jnp.bfloat16 ) pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", controlnet=controlnet, from_pt=True, dtype=jnp.bfloat16 + "stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet, from_pt=True, dtype=jnp.bfloat16 ) params["controlnet"] = controlnet_params diff --git a/tests/pipelines/ip_adapters/test_ip_adapter_stable_diffusion.py b/tests/pipelines/ip_adapters/test_ip_adapter_stable_diffusion.py index 741639e2d09e..a8180a3bc27f 100644 --- a/tests/pipelines/ip_adapters/test_ip_adapter_stable_diffusion.py +++ b/tests/pipelines/ip_adapters/test_ip_adapter_stable_diffusion.py @@ -170,7 +170,10 @@ class IPAdapterSDIntegrationTests(IPAdapterNightlyTestsMixin): def test_text_to_image(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype + "stable-diffusion-v1-5/stable-diffusion-v1-5", + image_encoder=image_encoder, + safety_checker=None, + torch_dtype=self.dtype, ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") @@ -200,7 +203,10 @@ def test_text_to_image(self): def test_image_to_image(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionImg2ImgPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype + "stable-diffusion-v1-5/stable-diffusion-v1-5", + image_encoder=image_encoder, + safety_checker=None, + torch_dtype=self.dtype, ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") @@ -232,7 +238,10 @@ def test_image_to_image(self): def test_inpainting(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionInpaintPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype + "stable-diffusion-v1-5/stable-diffusion-v1-5", + image_encoder=image_encoder, + safety_checker=None, + torch_dtype=self.dtype, ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") @@ -260,7 +269,10 @@ def test_inpainting(self): def test_text_to_image_model_cpu_offload(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype + "stable-diffusion-v1-5/stable-diffusion-v1-5", + image_encoder=image_encoder, + safety_checker=None, + torch_dtype=self.dtype, ) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") pipeline.to(torch_device) @@ -287,7 +299,10 @@ def test_text_to_image_model_cpu_offload(self): def test_text_to_image_full_face(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype + "stable-diffusion-v1-5/stable-diffusion-v1-5", + image_encoder=image_encoder, + safety_checker=None, + torch_dtype=self.dtype, ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter-full-face_sd15.bin") @@ -304,7 +319,10 @@ def test_text_to_image_full_face(self): def test_unload(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype + "stable-diffusion-v1-5/stable-diffusion-v1-5", + image_encoder=image_encoder, + safety_checker=None, + torch_dtype=self.dtype, ) before_processors = [attn_proc.__class__ for attn_proc in pipeline.unet.attn_processors.values()] pipeline.to(torch_device) @@ -323,7 +341,10 @@ def test_unload(self): def test_multi(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype + "stable-diffusion-v1-5/stable-diffusion-v1-5", + image_encoder=image_encoder, + safety_checker=None, + torch_dtype=self.dtype, ) pipeline.to(torch_device) pipeline.load_ip_adapter( @@ -343,7 +364,7 @@ def test_multi(self): def test_text_to_image_face_id(self): pipeline = StableDiffusionPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", safety_checker=None, torch_dtype=self.dtype + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, torch_dtype=self.dtype ) pipeline.to(torch_device) pipeline.load_ip_adapter( diff --git a/tests/pipelines/ledits_pp/test_ledits_pp_stable_diffusion.py b/tests/pipelines/ledits_pp/test_ledits_pp_stable_diffusion.py index 12742def67f8..effea2619749 100644 --- a/tests/pipelines/ledits_pp/test_ledits_pp_stable_diffusion.py +++ b/tests/pipelines/ledits_pp/test_ledits_pp_stable_diffusion.py @@ -224,7 +224,7 @@ def setUpClass(cls): def test_ledits_pp_editing(self): pipe = LEditsPPPipelineStableDiffusion.from_pretrained( - "Jiali/stable-diffusion-1.5", safety_checker=None, torch_dtype=torch.float16 + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) diff --git a/tests/pipelines/pag/test_pag_sd.py b/tests/pipelines/pag/test_pag_sd.py index 6a644e02f5e8..3979bb170e0b 100644 --- a/tests/pipelines/pag/test_pag_sd.py +++ b/tests/pipelines/pag/test_pag_sd.py @@ -283,7 +283,7 @@ def test_pag_inference(self): @require_torch_gpu class StableDiffusionPAGPipelineIntegrationTests(unittest.TestCase): pipeline_class = StableDiffusionPAGPipeline - repo_id = "Jiali/stable-diffusion-1.5" + repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" def setUp(self): super().setUp() diff --git a/tests/pipelines/semantic_stable_diffusion/test_semantic_diffusion.py b/tests/pipelines/semantic_stable_diffusion/test_semantic_diffusion.py index 3a9d3815e72d..990c389a9c5f 100644 --- a/tests/pipelines/semantic_stable_diffusion/test_semantic_diffusion.py +++ b/tests/pipelines/semantic_stable_diffusion/test_semantic_diffusion.py @@ -287,7 +287,7 @@ def tearDown(self): def test_positive_guidance(self): torch_device = "cuda" - pipe = StableDiffusionPipeline.from_pretrained("Jiali/stable-diffusion-1.5") + pipe = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5") pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) @@ -370,7 +370,7 @@ def test_positive_guidance(self): def test_negative_guidance(self): torch_device = "cuda" - pipe = StableDiffusionPipeline.from_pretrained("Jiali/stable-diffusion-1.5") + pipe = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5") pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) @@ -453,7 +453,7 @@ def test_negative_guidance(self): def test_multi_cond_guidance(self): torch_device = "cuda" - pipe = StableDiffusionPipeline.from_pretrained("Jiali/stable-diffusion-1.5") + pipe = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5") pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) @@ -536,7 +536,9 @@ def test_multi_cond_guidance(self): def test_guidance_fp16(self): torch_device = "cuda" - pipe = StableDiffusionPipeline.from_pretrained("Jiali/stable-diffusion-1.5", torch_dtype=torch.float16) + pipe = StableDiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16 + ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) diff --git a/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion.py b/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion.py index 24ea6e07280e..f7036dee47f0 100644 --- a/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion.py +++ b/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion.py @@ -250,10 +250,10 @@ def test_inference_default_pndm(self): def test_inference_ddim(self): ddim_scheduler = DDIMScheduler.from_pretrained( - "Jiali/stable-diffusion-1.5", subfolder="scheduler", revision="onnx" + "stable-diffusion-v1-5/stable-diffusion-v1-5", subfolder="scheduler", revision="onnx" ) sd_pipe = OnnxStableDiffusionPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", + "stable-diffusion-v1-5/stable-diffusion-v1-5", revision="onnx", scheduler=ddim_scheduler, safety_checker=None, @@ -276,10 +276,10 @@ def test_inference_ddim(self): def test_inference_k_lms(self): lms_scheduler = LMSDiscreteScheduler.from_pretrained( - "Jiali/stable-diffusion-1.5", subfolder="scheduler", revision="onnx" + "stable-diffusion-v1-5/stable-diffusion-v1-5", subfolder="scheduler", revision="onnx" ) sd_pipe = OnnxStableDiffusionPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", + "stable-diffusion-v1-5/stable-diffusion-v1-5", revision="onnx", scheduler=lms_scheduler, safety_checker=None, @@ -327,7 +327,7 @@ def test_callback_fn(step: int, timestep: int, latents: np.ndarray) -> None: test_callback_fn.has_been_called = False pipe = OnnxStableDiffusionPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", + "stable-diffusion-v1-5/stable-diffusion-v1-5", revision="onnx", safety_checker=None, feature_extractor=None, @@ -352,7 +352,7 @@ def test_callback_fn(step: int, timestep: int, latents: np.ndarray) -> None: def test_stable_diffusion_no_safety_checker(self): pipe = OnnxStableDiffusionPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", + "stable-diffusion-v1-5/stable-diffusion-v1-5", revision="onnx", safety_checker=None, feature_extractor=None, diff --git a/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_img2img.py b/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_img2img.py index 086354a2a649..c73ed0f6afe8 100644 --- a/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_img2img.py +++ b/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_img2img.py @@ -210,10 +210,10 @@ def test_inference_k_lms(self): ) init_image = init_image.resize((768, 512)) lms_scheduler = LMSDiscreteScheduler.from_pretrained( - "Jiali/stable-diffusion-1.5", subfolder="scheduler", revision="onnx" + "stable-diffusion-v1-5/stable-diffusion-v1-5", subfolder="scheduler", revision="onnx" ) pipe = OnnxStableDiffusionImg2ImgPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", + "stable-diffusion-v1-5/stable-diffusion-v1-5", revision="onnx", scheduler=lms_scheduler, safety_checker=None, diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion.py b/tests/pipelines/stable_diffusion/test_stable_diffusion.py index 5bb13fac9b78..f37d598c8387 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion.py @@ -1332,7 +1332,7 @@ def tearDown(self): def test_download_from_hub(self): ckpt_paths = [ - "https://huggingface.co/Jiali/stable-diffusion-1.5/blob/main/v1-5-pruned-emaonly.safetensors", + "https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors", "https://huggingface.co/WarriorMama777/OrangeMixs/blob/main/Models/AbyssOrangeMix/AbyssOrangeMix.safetensors", ] @@ -1346,8 +1346,10 @@ def test_download_from_hub(self): assert image_out.shape == (512, 512, 3) def test_download_local(self): - ckpt_filename = hf_hub_download("Jiali/stable-diffusion-1.5", filename="v1-5-pruned-emaonly.safetensors") - config_filename = hf_hub_download("Jiali/stable-diffusion-1.5", filename="v1-inference.yaml") + ckpt_filename = hf_hub_download( + "stable-diffusion-v1-5/stable-diffusion-v1-5", filename="v1-5-pruned-emaonly.safetensors" + ) + config_filename = hf_hub_download("stable-diffusion-v1-5/stable-diffusion-v1-5", filename="v1-inference.yaml") pipe = StableDiffusionPipeline.from_single_file( ckpt_filename, config_files={"v1": config_filename}, torch_dtype=torch.float16 @@ -1402,7 +1404,9 @@ def test_stable_diffusion_1_4_pndm(self): assert max_diff < 1e-3 def test_stable_diffusion_1_5_pndm(self): - sd_pipe = StableDiffusionPipeline.from_pretrained("Jiali/stable-diffusion-1.5").to(torch_device) + sd_pipe = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5").to( + torch_device + ) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) @@ -1483,9 +1487,9 @@ def get_inputs(self, generator_device="cpu", seed=0): return inputs def get_pipeline_output_without_device_map(self): - sd_pipe = StableDiffusionPipeline.from_pretrained("Jiali/stable-diffusion-1.5", torch_dtype=torch.float16).to( - torch_device - ) + sd_pipe = StableDiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16 + ).to(torch_device) sd_pipe.set_progress_bar_config(disable=True) inputs = self.get_inputs() no_device_map_image = sd_pipe(**inputs).images @@ -1498,7 +1502,7 @@ def test_forward_pass_balanced_device_map(self): no_device_map_image = self.get_pipeline_output_without_device_map() sd_pipe_with_device_map = StableDiffusionPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", device_map="balanced", torch_dtype=torch.float16 + "stable-diffusion-v1-5/stable-diffusion-v1-5", device_map="balanced", torch_dtype=torch.float16 ) sd_pipe_with_device_map.set_progress_bar_config(disable=True) inputs = self.get_inputs() @@ -1509,7 +1513,7 @@ def test_forward_pass_balanced_device_map(self): def test_components_put_in_right_devices(self): sd_pipe_with_device_map = StableDiffusionPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", device_map="balanced", torch_dtype=torch.float16 + "stable-diffusion-v1-5/stable-diffusion-v1-5", device_map="balanced", torch_dtype=torch.float16 ) assert len(set(sd_pipe_with_device_map.hf_device_map.values())) >= 2 @@ -1518,7 +1522,7 @@ def test_max_memory(self): no_device_map_image = self.get_pipeline_output_without_device_map() sd_pipe_with_device_map = StableDiffusionPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", + "stable-diffusion-v1-5/stable-diffusion-v1-5", device_map="balanced", max_memory={0: "1GB", 1: "1GB"}, torch_dtype=torch.float16, @@ -1532,7 +1536,7 @@ def test_max_memory(self): def test_reset_device_map(self): sd_pipe_with_device_map = StableDiffusionPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", device_map="balanced", torch_dtype=torch.float16 + "stable-diffusion-v1-5/stable-diffusion-v1-5", device_map="balanced", torch_dtype=torch.float16 ) sd_pipe_with_device_map.reset_device_map() @@ -1544,7 +1548,7 @@ def test_reset_device_map(self): def test_reset_device_map_to(self): sd_pipe_with_device_map = StableDiffusionPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", device_map="balanced", torch_dtype=torch.float16 + "stable-diffusion-v1-5/stable-diffusion-v1-5", device_map="balanced", torch_dtype=torch.float16 ) sd_pipe_with_device_map.reset_device_map() @@ -1556,7 +1560,7 @@ def test_reset_device_map_to(self): def test_reset_device_map_enable_model_cpu_offload(self): sd_pipe_with_device_map = StableDiffusionPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", device_map="balanced", torch_dtype=torch.float16 + "stable-diffusion-v1-5/stable-diffusion-v1-5", device_map="balanced", torch_dtype=torch.float16 ) sd_pipe_with_device_map.reset_device_map() @@ -1568,7 +1572,7 @@ def test_reset_device_map_enable_model_cpu_offload(self): def test_reset_device_map_enable_sequential_cpu_offload(self): sd_pipe_with_device_map = StableDiffusionPipeline.from_pretrained( - "Jiali/stable-diffusion-1.5", device_map="balanced", torch_dtype=torch.float16 + "stable-diffusion-v1-5/stable-diffusion-v1-5", device_map="balanced", torch_dtype=torch.float16 ) sd_pipe_with_device_map.reset_device_map() diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py index e6de84781f26..7ba0bb5a4a5d 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py @@ -566,7 +566,7 @@ def test_stable_diffusion_pipeline_with_model_offloading(self): assert module.device == torch.device("cpu") def test_img2img_2nd_order(self): - sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("Jiali/stable-diffusion-1.5") + sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5") sd_pipe.scheduler = HeunDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) @@ -630,7 +630,7 @@ def test_stable_diffusion_img2img_pipeline_multiple_of_8(self): assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3 def test_img2img_safety_checker_works(self): - sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("Jiali/stable-diffusion-1.5") + sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5") sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) @@ -686,7 +686,7 @@ def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0 return inputs def test_img2img_pndm(self): - sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("Jiali/stable-diffusion-1.5") + sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5") sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) @@ -701,7 +701,7 @@ def test_img2img_pndm(self): assert max_diff < 1e-3 def test_img2img_ddim(self): - sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("Jiali/stable-diffusion-1.5") + sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5") sd_pipe.scheduler = DDIMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) @@ -717,7 +717,7 @@ def test_img2img_ddim(self): assert max_diff < 1e-3 def test_img2img_lms(self): - sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("Jiali/stable-diffusion-1.5") + sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5") sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) @@ -733,7 +733,7 @@ def test_img2img_lms(self): assert max_diff < 1e-3 def test_img2img_dpm(self): - sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("Jiali/stable-diffusion-1.5") + sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5") sd_pipe.scheduler = DPMSolverMultistepScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py index 28da97be9362..ff04ea2cfc5d 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py @@ -767,7 +767,9 @@ def test_stable_diffusion_inpaint_strength_test(self): assert np.abs(expected_slice - image_slice).max() < 1e-3 def test_stable_diffusion_simple_inpaint_ddim(self): - pipe = StableDiffusionInpaintPipeline.from_pretrained("Jiali/stable-diffusion-1.5", safety_checker=None) + pipe = StableDiffusionInpaintPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None + ) pipe.unet.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) @@ -973,7 +975,9 @@ def test_stable_diffusion_inpaint_strength_test(self): def test_stable_diffusion_simple_inpaint_ddim(self): vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5") - pipe = StableDiffusionInpaintPipeline.from_pretrained("Jiali/stable-diffusion-1.5", safety_checker=None) + pipe = StableDiffusionInpaintPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None + ) pipe.vae = vae pipe.unet.set_default_attn_processor() pipe.to(torch_device) diff --git a/tests/pipelines/stable_diffusion_adapter/test_stable_diffusion_adapter.py b/tests/pipelines/stable_diffusion_adapter/test_stable_diffusion_adapter.py index 8e0841f064e0..2a1e691e9e8f 100644 --- a/tests/pipelines/stable_diffusion_adapter/test_stable_diffusion_adapter.py +++ b/tests/pipelines/stable_diffusion_adapter/test_stable_diffusion_adapter.py @@ -609,7 +609,7 @@ def tearDown(self): def test_stable_diffusion_adapter_depth_sd_v15(self): adapter_model = "TencentARC/t2iadapter_depth_sd15v2" - sd_model = "Jiali/stable-diffusion-1.5" + sd_model = "stable-diffusion-v1-5/stable-diffusion-v1-5" prompt = "desk" image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/desk_depth.png" input_channels = 3 @@ -636,7 +636,7 @@ def test_stable_diffusion_adapter_depth_sd_v15(self): def test_stable_diffusion_adapter_zoedepth_sd_v15(self): adapter_model = "TencentARC/t2iadapter_zoedepth_sd15v1" - sd_model = "Jiali/stable-diffusion-1.5" + sd_model = "stable-diffusion-v1-5/stable-diffusion-v1-5" prompt = "motorcycle" image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/motorcycle.png" input_channels = 3 @@ -660,7 +660,7 @@ def test_stable_diffusion_adapter_zoedepth_sd_v15(self): def test_stable_diffusion_adapter_canny_sd_v15(self): adapter_model = "TencentARC/t2iadapter_canny_sd15v2" - sd_model = "Jiali/stable-diffusion-1.5" + sd_model = "stable-diffusion-v1-5/stable-diffusion-v1-5" prompt = "toy" image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/toy_canny.png" input_channels = 1 @@ -688,7 +688,7 @@ def test_stable_diffusion_adapter_canny_sd_v15(self): def test_stable_diffusion_adapter_sketch_sd15(self): adapter_model = "TencentARC/t2iadapter_sketch_sd15v2" - sd_model = "Jiali/stable-diffusion-1.5" + sd_model = "stable-diffusion-v1-5/stable-diffusion-v1-5" prompt = "cat" image_url = ( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/edge.png" diff --git a/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py b/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py index cfaaa0914d17..ccb20a1c218e 100644 --- a/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py +++ b/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py @@ -277,7 +277,9 @@ def tearDown(self): torch.cuda.empty_cache() def test_harm_safe_stable_diffusion(self): - sd_pipe = StableDiffusionPipeline.from_pretrained("Jiali/stable-diffusion-1.5", safety_checker=None) + sd_pipe = StableDiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None + ) sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) @@ -338,7 +340,9 @@ def test_harm_safe_stable_diffusion(self): assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_nudity_safe_stable_diffusion(self): - sd_pipe = StableDiffusionPipeline.from_pretrained("Jiali/stable-diffusion-1.5", safety_checker=None) + sd_pipe = StableDiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None + ) sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) @@ -392,7 +396,7 @@ def test_nudity_safe_stable_diffusion(self): assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_nudity_safetychecker_safe_stable_diffusion(self): - sd_pipe = StableDiffusionPipeline.from_pretrained("Jiali/stable-diffusion-1.5") + sd_pipe = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5") sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) diff --git a/tests/pipelines/test_pipelines_auto.py b/tests/pipelines/test_pipelines_auto.py index b899cf240d52..561a9011c6ae 100644 --- a/tests/pipelines/test_pipelines_auto.py +++ b/tests/pipelines/test_pipelines_auto.py @@ -40,7 +40,7 @@ PRETRAINED_MODEL_REPO_MAPPING = OrderedDict( [ - ("stable-diffusion", "Jiali/stable-diffusion-1.5"), + ("stable-diffusion", "stable-diffusion-v1-5/stable-diffusion-v1-5"), ("if", "DeepFloyd/IF-I-XL-v1.0"), ("kandinsky", "kandinsky-community/kandinsky-2-1"), ("kandinsky22", "kandinsky-community/kandinsky-2-2-decoder"), @@ -539,7 +539,7 @@ def test_from_pipe_consistent(self): def test_controlnet(self): # test from_pretrained - model_repo = "Jiali/stable-diffusion-1.5" + model_repo = "stable-diffusion-v1-5/stable-diffusion-v1-5" controlnet_repo = "lllyasviel/sd-controlnet-canny" controlnet = ControlNetModel.from_pretrained(controlnet_repo, torch_dtype=torch.float16) diff --git a/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero.py b/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero.py index 9038e3b0100f..f1bf6ee52206 100644 --- a/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero.py +++ b/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero.py @@ -40,7 +40,7 @@ def tearDown(self): torch.cuda.empty_cache() def test_full_model(self): - model_id = "Jiali/stable-diffusion-1.5" + model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" pipe = TextToVideoZeroPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) generator = torch.Generator(device="cuda").manual_seed(0) diff --git a/tests/single_file/test_stable_diffusion_controlnet_img2img_single_file.py b/tests/single_file/test_stable_diffusion_controlnet_img2img_single_file.py index 5127b9e745d8..332bcfbe03b6 100644 --- a/tests/single_file/test_stable_diffusion_controlnet_img2img_single_file.py +++ b/tests/single_file/test_stable_diffusion_controlnet_img2img_single_file.py @@ -30,11 +30,13 @@ @require_torch_gpu class StableDiffusionControlNetPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin): pipeline_class = StableDiffusionControlNetPipeline - ckpt_path = "https://huggingface.co/Jiali/stable-diffusion-1.5/blob/main/v1-5-pruned-emaonly.safetensors" + ckpt_path = ( + "https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors" + ) original_config = ( "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" ) - repo_id = "Jiali/stable-diffusion-1.5" + repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" def setUp(self): super().setUp() diff --git a/tests/single_file/test_stable_diffusion_controlnet_inpaint_single_file.py b/tests/single_file/test_stable_diffusion_controlnet_inpaint_single_file.py index 9d6576078a9c..c0d70123b286 100644 --- a/tests/single_file/test_stable_diffusion_controlnet_inpaint_single_file.py +++ b/tests/single_file/test_stable_diffusion_controlnet_inpaint_single_file.py @@ -31,7 +31,7 @@ class StableDiffusionControlNetInpaintPipelineSingleFileSlowTests(unittest.TestC pipeline_class = StableDiffusionControlNetInpaintPipeline ckpt_path = "https://huggingface.co/botp/stable-diffusion-v1-5-inpainting/blob/main/sd-v1-5-inpainting.ckpt" original_config = "https://raw.githubusercontent.com/runwayml/stable-diffusion/main/configs/stable-diffusion/v1-inpainting-inference.yaml" - repo_id = "botp/stable-diffusion-v1-5-inpainting" + repo_id = "stable-diffusion-v1-5/stable-diffusion-inpainting" def setUp(self): super().setUp() diff --git a/tests/single_file/test_stable_diffusion_controlnet_single_file.py b/tests/single_file/test_stable_diffusion_controlnet_single_file.py index 13d64dab77a1..3b5cf910b080 100644 --- a/tests/single_file/test_stable_diffusion_controlnet_single_file.py +++ b/tests/single_file/test_stable_diffusion_controlnet_single_file.py @@ -29,11 +29,13 @@ @require_torch_gpu class StableDiffusionControlNetPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin): pipeline_class = StableDiffusionControlNetPipeline - ckpt_path = "https://huggingface.co/Jiali/stable-diffusion-1.5/blob/main/v1-5-pruned-emaonly.safetensors" + ckpt_path = ( + "https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors" + ) original_config = ( "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" ) - repo_id = "Jiali/stable-diffusion-1.5" + repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" def setUp(self): super().setUp() diff --git a/tests/single_file/test_stable_diffusion_img2img_single_file.py b/tests/single_file/test_stable_diffusion_img2img_single_file.py index fd99c4dede2f..04f36f255014 100644 --- a/tests/single_file/test_stable_diffusion_img2img_single_file.py +++ b/tests/single_file/test_stable_diffusion_img2img_single_file.py @@ -23,11 +23,13 @@ @require_torch_gpu class StableDiffusionImg2ImgPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin): pipeline_class = StableDiffusionImg2ImgPipeline - ckpt_path = "https://huggingface.co/Jiali/stable-diffusion-1.5/blob/main/v1-5-pruned-emaonly.safetensors" + ckpt_path = ( + "https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors" + ) original_config = ( "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" ) - repo_id = "Jiali/stable-diffusion-1.5" + repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" def setUp(self): super().setUp() diff --git a/tests/single_file/test_stable_diffusion_inpaint_single_file.py b/tests/single_file/test_stable_diffusion_inpaint_single_file.py index ba9583639b98..5c6734a9a33e 100644 --- a/tests/single_file/test_stable_diffusion_inpaint_single_file.py +++ b/tests/single_file/test_stable_diffusion_inpaint_single_file.py @@ -63,7 +63,7 @@ def test_single_file_format_inference_is_same_as_pretrained(self): def test_single_file_loading_4_channel_unet(self): # Test loading single file inpaint with a 4 channel UNet - ckpt_path = "https://huggingface.co/Jiali/stable-diffusion-1.5/blob/main/v1-5-pruned-emaonly.safetensors" + ckpt_path = "https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors" pipe = self.pipeline_class.from_single_file(ckpt_path) assert pipe.unet.config.in_channels == 4 diff --git a/tests/single_file/test_stable_diffusion_single_file.py b/tests/single_file/test_stable_diffusion_single_file.py index d40af28b2407..e46e87e18c18 100644 --- a/tests/single_file/test_stable_diffusion_single_file.py +++ b/tests/single_file/test_stable_diffusion_single_file.py @@ -26,11 +26,13 @@ @require_torch_gpu class StableDiffusionPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin): pipeline_class = StableDiffusionPipeline - ckpt_path = "https://huggingface.co/Jiali/stable-diffusion-1.5/blob/main/v1-5-pruned-emaonly.safetensors" + ckpt_path = ( + "https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors" + ) original_config = ( "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" ) - repo_id = "Jiali/stable-diffusion-1.5" + repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" def setUp(self): super().setUp()