@@ -26,8 +26,8 @@ The abstract of the paper is the following:
2626
2727### Available checkpoints:
2828
29- - * Text-to-Image (1024x1024 resolution)* : [ stabilityai/stable-diffusion-xl-base-0.9 ] ( https://huggingface.co/stabilityai/stable-diffusion-xl-base-0.9 ) with [ ` StableDiffusionXLPipeline ` ]
30- - * Image-to-Image / Refiner (1024x1024 resolution)* : [ stabilityai/stable-diffusion-xl-refiner-0.9 ] ( https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-0.9 ) with [ ` StableDiffusionXLImg2ImgPipeline ` ]
29+ - * Text-to-Image (1024x1024 resolution)* : [ stabilityai/stable-diffusion-xl-base-1.0 ] ( https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0 ) with [ ` StableDiffusionXLPipeline ` ]
30+ - * Image-to-Image / Refiner (1024x1024 resolution)* : [ stabilityai/stable-diffusion-xl-refiner-1.0 ] ( https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0 ) with [ ` StableDiffusionXLImg2ImgPipeline ` ]
3131
3232## Usage Example
3333
@@ -50,7 +50,7 @@ from diffusers import StableDiffusionXLPipeline
5050import torch
5151
5252pipe = StableDiffusionXLPipeline.from_pretrained(
53- " stabilityai/stable-diffusion-xl-base-0.9 " , torch_dtype = torch.float16, variant = " fp16" , use_safetensors = True
53+ " stabilityai/stable-diffusion-xl-base-1.0 " , torch_dtype = torch.float16, variant = " fp16" , use_safetensors = True
5454)
5555pipe.to(" cuda" )
5656
@@ -68,7 +68,7 @@ from diffusers import StableDiffusionXLImg2ImgPipeline
6868from diffusers.utils import load_image
6969
7070pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained(
71- " stabilityai/stable-diffusion-xl-refiner-0.9 " , torch_dtype = torch.float16, variant = " fp16" , use_safetensors = True
71+ " stabilityai/stable-diffusion-xl-refiner-1.0 " , torch_dtype = torch.float16, variant = " fp16" , use_safetensors = True
7272)
7373pipe = pipe.to(" cuda" )
7474url = " https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/aa_xl/000000009.png"
@@ -88,7 +88,7 @@ from diffusers import StableDiffusionXLInpaintPipeline
8888from diffusers.utils import load_image
8989
9090pipe = StableDiffusionXLInpaintPipeline.from_pretrained(
91- " stabilityai/stable-diffusion-xl-base-0.9 " , torch_dtype = torch.float16, variant = " fp16" , use_safetensors = True
91+ " stabilityai/stable-diffusion-xl-base-1.0 " , torch_dtype = torch.float16, variant = " fp16" , use_safetensors = True
9292)
9393pipe.to(" cuda" )
9494
@@ -104,8 +104,8 @@ image = pipe(prompt=prompt, image=init_image, mask_image=mask_image, num_inferen
104104
105105### Refining the image output
106106
107- In addition to the [ base model checkpoint] ( https://huggingface.co/stabilityai/stable-diffusion-xl-base-0.9 ) ,
108- StableDiffusion-XL also includes a [ refiner checkpoint] ( huggingface.co/stabilityai/stable-diffusion-xl-refiner-0.9 )
107+ In addition to the [ base model checkpoint] ( https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0 ) ,
108+ StableDiffusion-XL also includes a [ refiner checkpoint] ( huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0 )
109109that is specialized in denoising low-noise stage images to generate images of improved high-frequency quality.
110110This refiner checkpoint can be used as a "second-step" pipeline after having run the base checkpoint to improve
111111image quality.
@@ -149,12 +149,12 @@ from diffusers import DiffusionPipeline
149149import torch
150150
151151base = DiffusionPipeline.from_pretrained(
152- " stabilityai/stable-diffusion-xl-base-0.9 " , torch_dtype = torch.float16, variant = " fp16" , use_safetensors = True
152+ " stabilityai/stable-diffusion-xl-base-1.0 " , torch_dtype = torch.float16, variant = " fp16" , use_safetensors = True
153153)
154154pipe.to(" cuda" )
155155
156156refiner = DiffusionPipeline.from_pretrained(
157- " stabilityai/stable-diffusion-xl-refiner-0.9 " ,
157+ " stabilityai/stable-diffusion-xl-refiner-1.0 " ,
158158 text_encoder_2 = base.text_encoder_2,
159159 vae = base.vae,
160160 torch_dtype = torch.float16,
@@ -219,7 +219,7 @@ The ensemble-of-experts method works well on all available schedulers!
219219#### 2.) Refining the image output from fully denoised base image
220220
221221In standard [ ` StableDiffusionImg2ImgPipeline ` ] -fashion, the fully-denoised image generated of the base model
222- can be further improved using the [ refiner checkpoint] ( huggingface.co/stabilityai/stable-diffusion-xl-refiner-0.9 ) .
222+ can be further improved using the [ refiner checkpoint] ( huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0 ) .
223223
224224For this, you simply run the refiner as a normal image-to-image pipeline after the "base" text-to-image
225225pipeline. You can leave the outputs of the base model in latent space.
@@ -229,12 +229,12 @@ from diffusers import DiffusionPipeline
229229import torch
230230
231231pipe = DiffusionPipeline.from_pretrained(
232- " stabilityai/stable-diffusion-xl-base-0.9 " , torch_dtype = torch.float16, variant = " fp16" , use_safetensors = True
232+ " stabilityai/stable-diffusion-xl-base-1.0 " , torch_dtype = torch.float16, variant = " fp16" , use_safetensors = True
233233)
234234pipe.to(" cuda" )
235235
236236refiner = DiffusionPipeline.from_pretrained(
237- " stabilityai/stable-diffusion-xl-refiner-0.9 " ,
237+ " stabilityai/stable-diffusion-xl-refiner-1.0 " ,
238238 text_encoder_2 = pipe.text_encoder_2,
239239 vae = pipe.vae,
240240 torch_dtype = torch.float16,
@@ -267,12 +267,12 @@ from diffusers import StableDiffusionXLInpaintPipeline
267267from diffusers.utils import load_image
268268
269269pipe = StableDiffusionXLInpaintPipeline.from_pretrained(
270- " stabilityai/stable-diffusion-xl-base-0.9 " , torch_dtype = torch.float16, variant = " fp16" , use_safetensors = True
270+ " stabilityai/stable-diffusion-xl-base-1.0 " , torch_dtype = torch.float16, variant = " fp16" , use_safetensors = True
271271)
272272pipe.to(" cuda" )
273273
274274refiner = StableDiffusionXLInpaintPipeline.from_pretrained(
275- " stabilityai/stable-diffusion-xl-refiner-0.9 " ,
275+ " stabilityai/stable-diffusion-xl-refiner-1.0 " ,
276276 text_encoder_2 = pipe.text_encoder_2,
277277 vae = pipe.vae,
278278 torch_dtype = torch.float16,
@@ -321,12 +321,12 @@ from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipelin
321321import torch
322322
323323pipe = StableDiffusionXLPipeline.from_single_file(
324- " ./sd_xl_base_0.9 .safetensors" , torch_dtype = torch.float16, variant = " fp16" , use_safetensors = True
324+ " ./sd_xl_base_1.0 .safetensors" , torch_dtype = torch.float16, variant = " fp16" , use_safetensors = True
325325)
326326pipe.to(" cuda" )
327327
328328refiner = StableDiffusionXLImg2ImgPipeline.from_single_file(
329- " ./sd_xl_refiner_0.9 .safetensors" , torch_dtype = torch.float16, use_safetensors = True , variant = " fp16"
329+ " ./sd_xl_refiner_1.0 .safetensors" , torch_dtype = torch.float16, use_safetensors = True , variant = " fp16"
330330)
331331refiner.to(" cuda" )
332332```
@@ -399,7 +399,7 @@ from diffusers import StableDiffusionXLPipeline
399399import torch
400400
401401pipe = StableDiffusionXLPipeline.from_pretrained(
402- " stabilityai/stable-diffusion-xl-base-0.9 " , torch_dtype = torch.float16, variant = " fp16" , use_safetensors = True
402+ " stabilityai/stable-diffusion-xl-base-1.0 " , torch_dtype = torch.float16, variant = " fp16" , use_safetensors = True
403403)
404404pipe.to(" cuda" )
405405
0 commit comments