Skip to content

Commit 98a2b3d

Browse files
authored
Update Animatediff docs (#6341)
* update * update * update
1 parent 2026ec0 commit 98a2b3d

File tree

1 file changed

+40
-16
lines changed

1 file changed

+40
-16
lines changed

docs/source/en/api/pipelines/animatediff.md

Lines changed: 40 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -38,16 +38,21 @@ The following example demonstrates how to use a *MotionAdapter* checkpoint with
3838

3939
```python
4040
import torch
41-
from diffusers import MotionAdapter, AnimateDiffPipeline, DDIMScheduler
41+
from diffusers import AnimateDiffPipeline, DDIMScheduler, MotionAdapter
4242
from diffusers.utils import export_to_gif
4343

4444
# Load the motion adapter
45-
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2")
45+
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16)
4646
# load SD 1.5 based finetuned model
4747
model_id = "SG161222/Realistic_Vision_V5.1_noVAE"
48-
pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter)
48+
pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter, torch_dtype=torch.float16)
4949
scheduler = DDIMScheduler.from_pretrained(
50-
model_id, subfolder="scheduler", clip_sample=False, timestep_spacing="linspace", steps_offset=1
50+
model_id,
51+
subfolder="scheduler",
52+
clip_sample=False,
53+
timestep_spacing="linspace",
54+
beta_schedule="linear",
55+
steps_offset=1,
5156
)
5257
pipe.scheduler = scheduler
5358

@@ -70,6 +75,7 @@ output = pipe(
7075
)
7176
frames = output.frames[0]
7277
export_to_gif(frames, "animation.gif")
78+
7379
```
7480

7581
Here are some sample outputs:
@@ -88,7 +94,7 @@ Here are some sample outputs:
8894

8995
<Tip>
9096

91-
AnimateDiff tends to work better with finetuned Stable Diffusion models. If you plan on using a scheduler that can clip samples, make sure to disable it by setting `clip_sample=False` in the scheduler as this can also have an adverse effect on generated samples.
97+
AnimateDiff tends to work better with finetuned Stable Diffusion models. If you plan on using a scheduler that can clip samples, make sure to disable it by setting `clip_sample=False` in the scheduler as this can also have an adverse effect on generated samples. Additionally, the AnimateDiff checkpoints can be sensitive to the beta schedule of the scheduler. We recommend setting this to `linear`.
9298

9399
</Tip>
94100

@@ -98,18 +104,25 @@ Motion LoRAs are a collection of LoRAs that work with the `guoyww/animatediff-mo
98104

99105
```python
100106
import torch
101-
from diffusers import MotionAdapter, AnimateDiffPipeline, DDIMScheduler
107+
from diffusers import AnimateDiffPipeline, DDIMScheduler, MotionAdapter
102108
from diffusers.utils import export_to_gif
103109

104110
# Load the motion adapter
105-
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2")
111+
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16)
106112
# load SD 1.5 based finetuned model
107113
model_id = "SG161222/Realistic_Vision_V5.1_noVAE"
108-
pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter)
109-
pipe.load_lora_weights("guoyww/animatediff-motion-lora-zoom-out", adapter_name="zoom-out")
114+
pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter, torch_dtype=torch.float16)
115+
pipe.load_lora_weights(
116+
"guoyww/animatediff-motion-lora-zoom-out", adapter_name="zoom-out"
117+
)
110118

111119
scheduler = DDIMScheduler.from_pretrained(
112-
model_id, subfolder="scheduler", clip_sample=False, timestep_spacing="linspace", steps_offset=1
120+
model_id,
121+
subfolder="scheduler",
122+
clip_sample=False,
123+
beta_schedule="linear",
124+
timestep_spacing="linspace",
125+
steps_offset=1,
113126
)
114127
pipe.scheduler = scheduler
115128

@@ -132,6 +145,7 @@ output = pipe(
132145
)
133146
frames = output.frames[0]
134147
export_to_gif(frames, "animation.gif")
148+
135149
```
136150

137151
<table>
@@ -160,21 +174,30 @@ Then you can use the following code to combine Motion LoRAs.
160174

161175
```python
162176
import torch
163-
from diffusers import MotionAdapter, AnimateDiffPipeline, DDIMScheduler
177+
from diffusers import AnimateDiffPipeline, DDIMScheduler, MotionAdapter
164178
from diffusers.utils import export_to_gif
165179

166180
# Load the motion adapter
167-
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2")
181+
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16)
168182
# load SD 1.5 based finetuned model
169183
model_id = "SG161222/Realistic_Vision_V5.1_noVAE"
170-
pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter)
184+
pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter, torch_dtype=torch.float16)
171185

172-
pipe.load_lora_weights("diffusers/animatediff-motion-lora-zoom-out", adapter_name="zoom-out")
173-
pipe.load_lora_weights("diffusers/animatediff-motion-lora-pan-left", adapter_name="pan-left")
186+
pipe.load_lora_weights(
187+
"diffusers/animatediff-motion-lora-zoom-out", adapter_name="zoom-out",
188+
)
189+
pipe.load_lora_weights(
190+
"diffusers/animatediff-motion-lora-pan-left", adapter_name="pan-left",
191+
)
174192
pipe.set_adapters(["zoom-out", "pan-left"], adapter_weights=[1.0, 1.0])
175193

176194
scheduler = DDIMScheduler.from_pretrained(
177-
model_id, subfolder="scheduler", clip_sample=False, timestep_spacing="linspace", steps_offset=1
195+
model_id,
196+
subfolder="scheduler",
197+
clip_sample=False,
198+
timestep_spacing="linspace",
199+
beta_schedule="linear",
200+
steps_offset=1,
178201
)
179202
pipe.scheduler = scheduler
180203

@@ -197,6 +220,7 @@ output = pipe(
197220
)
198221
frames = output.frames[0]
199222
export_to_gif(frames, "animation.gif")
223+
200224
```
201225

202226
<table>

0 commit comments

Comments
 (0)