Skip to content

Commit 25e0656

Browse files
committed
runwayml take-down make change to sd-legacy
1 parent e92eabf commit 25e0656

File tree

5 files changed

+10
-10
lines changed

5 files changed

+10
-10
lines changed

examples/advanced_diffusion_training/train_dreambooth_lora_sd15_advanced.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -160,7 +160,7 @@ def save_model_card(
160160
from diffusers import AutoPipelineForText2Image
161161
import torch
162162
{diffusers_imports_pivotal}
163-
pipeline = AutoPipelineForText2Image.from_pretrained('runwayml/stable-diffusion-v1-5', torch_dtype=torch.float16).to('cuda')
163+
pipeline = AutoPipelineForText2Image.from_pretrained('stable-diffusion-v1-5/stable-diffusion-v1-5', torch_dtype=torch.float16).to('cuda')
164164
pipeline.load_lora_weights('{repo_id}', weight_name='pytorch_lora_weights.safetensors')
165165
{diffusers_example_pivotal}
166166
image = pipeline('{validation_prompt if validation_prompt else instance_prompt}').images[0]

scripts/convert_blipdiffusion_to_diffusers.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -303,10 +303,10 @@ def save_blip_diffusion_model(model, args):
303303
qformer = get_qformer(model)
304304
qformer.eval()
305305

306-
text_encoder = ContextCLIPTextModel.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="text_encoder")
307-
vae = AutoencoderKL.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="vae")
306+
text_encoder = ContextCLIPTextModel.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", subfolder="text_encoder")
307+
vae = AutoencoderKL.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", subfolder="vae")
308308

309-
unet = UNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="unet")
309+
unet = UNet2DConditionModel.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", subfolder="unet")
310310
vae.eval()
311311
text_encoder.eval()
312312
scheduler = PNDMScheduler(
@@ -316,7 +316,7 @@ def save_blip_diffusion_model(model, args):
316316
set_alpha_to_one=False,
317317
skip_prk_steps=True,
318318
)
319-
tokenizer = CLIPTokenizer.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="tokenizer")
319+
tokenizer = CLIPTokenizer.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", subfolder="tokenizer")
320320
image_processor = BlipImageProcessor()
321321
blip_diffusion = BlipDiffusionPipeline(
322322
tokenizer=tokenizer,

src/diffusers/loaders/single_file.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -329,7 +329,7 @@ def from_single_file(cls, pretrained_model_link_or_path, **kwargs):
329329
330330
>>> # Enable float16 and move to GPU
331331
>>> pipeline = StableDiffusionPipeline.from_single_file(
332-
... "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt",
332+
... "https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt",
333333
... torch_dtype=torch.float16,
334334
... )
335335
>>> pipeline.to("cuda")

src/diffusers/loaders/textual_inversion.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -333,7 +333,7 @@ def load_textual_inversion(
333333
from diffusers import StableDiffusionPipeline
334334
import torch
335335
336-
model_id = "runwayml/stable-diffusion-v1-5"
336+
model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5"
337337
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
338338
339339
pipe.load_textual_inversion("sd-concepts-library/cat-toy")
@@ -352,7 +352,7 @@ def load_textual_inversion(
352352
from diffusers import StableDiffusionPipeline
353353
import torch
354354
355-
model_id = "runwayml/stable-diffusion-v1-5"
355+
model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5"
356356
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
357357
358358
pipe.load_textual_inversion("./charturnerv2.pt", token="charturnerv2")
@@ -469,7 +469,7 @@ def unload_textual_inversion(
469469
from diffusers import AutoPipelineForText2Image
470470
import torch
471471
472-
pipeline = AutoPipelineForText2Image.from_pretrained("runwayml/stable-diffusion-v1-5")
472+
pipeline = AutoPipelineForText2Image.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5")
473473
474474
# Example 1
475475
pipeline.load_textual_inversion("sd-concepts-library/gta5-artwork")

src/diffusers/models/autoencoders/consistency_decoder_vae.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ class ConsistencyDecoderVAE(ModelMixin, ConfigMixin):
6060
6161
>>> vae = ConsistencyDecoderVAE.from_pretrained("openai/consistency-decoder", torch_dtype=torch.float16)
6262
>>> pipe = StableDiffusionPipeline.from_pretrained(
63-
... "runwayml/stable-diffusion-v1-5", vae=vae, torch_dtype=torch.float16
63+
... "stable-diffusion-v1-5/stable-diffusion-v1-5", vae=vae, torch_dtype=torch.float16
6464
... ).to("cuda")
6565
6666
>>> image = pipe("horse", generator=torch.manual_seed(0)).images[0]

0 commit comments

Comments
 (0)