@@ -38,16 +38,21 @@ The following example demonstrates how to use a *MotionAdapter* checkpoint with
38
38
39
39
``` python
40
40
import torch
41
- from diffusers import MotionAdapter, AnimateDiffPipeline, DDIMScheduler
41
+ from diffusers import AnimateDiffPipeline, DDIMScheduler, MotionAdapter
42
42
from diffusers.utils import export_to_gif
43
43
44
44
# Load the motion adapter
45
- adapter = MotionAdapter.from_pretrained(" guoyww/animatediff-motion-adapter-v1-5-2" )
45
+ adapter = MotionAdapter.from_pretrained(" guoyww/animatediff-motion-adapter-v1-5-2" , torch_dtype = torch.float16 )
46
46
# load SD 1.5 based finetuned model
47
47
model_id = " SG161222/Realistic_Vision_V5.1_noVAE"
48
- pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter = adapter)
48
+ pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter = adapter, torch_dtype = torch.float16 )
49
49
scheduler = DDIMScheduler.from_pretrained(
50
- model_id, subfolder = " scheduler" , clip_sample = False , timestep_spacing = " linspace" , steps_offset = 1
50
+ model_id,
51
+ subfolder = " scheduler" ,
52
+ clip_sample = False ,
53
+ timestep_spacing = " linspace" ,
54
+ beta_schedule = " linear" ,
55
+ steps_offset = 1 ,
51
56
)
52
57
pipe.scheduler = scheduler
53
58
@@ -70,6 +75,7 @@ output = pipe(
70
75
)
71
76
frames = output.frames[0 ]
72
77
export_to_gif(frames, " animation.gif" )
78
+
73
79
```
74
80
75
81
Here are some sample outputs:
@@ -88,7 +94,7 @@ Here are some sample outputs:
88
94
89
95
<Tip >
90
96
91
- AnimateDiff tends to work better with finetuned Stable Diffusion models. If you plan on using a scheduler that can clip samples, make sure to disable it by setting ` clip_sample=False ` in the scheduler as this can also have an adverse effect on generated samples.
97
+ AnimateDiff tends to work better with finetuned Stable Diffusion models. If you plan on using a scheduler that can clip samples, make sure to disable it by setting ` clip_sample=False ` in the scheduler as this can also have an adverse effect on generated samples. Additionally, the AnimateDiff checkpoints can be sensitive to the beta schedule of the scheduler. We recommend setting this to ` linear ` .
92
98
93
99
</Tip >
94
100
@@ -98,18 +104,25 @@ Motion LoRAs are a collection of LoRAs that work with the `guoyww/animatediff-mo
98
104
99
105
``` python
100
106
import torch
101
- from diffusers import MotionAdapter, AnimateDiffPipeline, DDIMScheduler
107
+ from diffusers import AnimateDiffPipeline, DDIMScheduler, MotionAdapter
102
108
from diffusers.utils import export_to_gif
103
109
104
110
# Load the motion adapter
105
- adapter = MotionAdapter.from_pretrained(" guoyww/animatediff-motion-adapter-v1-5-2" )
111
+ adapter = MotionAdapter.from_pretrained(" guoyww/animatediff-motion-adapter-v1-5-2" , torch_dtype = torch.float16 )
106
112
# load SD 1.5 based finetuned model
107
113
model_id = " SG161222/Realistic_Vision_V5.1_noVAE"
108
- pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter = adapter)
109
- pipe.load_lora_weights(" guoyww/animatediff-motion-lora-zoom-out" , adapter_name = " zoom-out" )
114
+ pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter = adapter, torch_dtype = torch.float16)
115
+ pipe.load_lora_weights(
116
+ " guoyww/animatediff-motion-lora-zoom-out" , adapter_name = " zoom-out"
117
+ )
110
118
111
119
scheduler = DDIMScheduler.from_pretrained(
112
- model_id, subfolder = " scheduler" , clip_sample = False , timestep_spacing = " linspace" , steps_offset = 1
120
+ model_id,
121
+ subfolder = " scheduler" ,
122
+ clip_sample = False ,
123
+ beta_schedule = " linear" ,
124
+ timestep_spacing = " linspace" ,
125
+ steps_offset = 1 ,
113
126
)
114
127
pipe.scheduler = scheduler
115
128
@@ -132,6 +145,7 @@ output = pipe(
132
145
)
133
146
frames = output.frames[0 ]
134
147
export_to_gif(frames, " animation.gif" )
148
+
135
149
```
136
150
137
151
<table >
@@ -160,21 +174,30 @@ Then you can use the following code to combine Motion LoRAs.
160
174
161
175
``` python
162
176
import torch
163
- from diffusers import MotionAdapter, AnimateDiffPipeline, DDIMScheduler
177
+ from diffusers import AnimateDiffPipeline, DDIMScheduler, MotionAdapter
164
178
from diffusers.utils import export_to_gif
165
179
166
180
# Load the motion adapter
167
- adapter = MotionAdapter.from_pretrained(" guoyww/animatediff-motion-adapter-v1-5-2" )
181
+ adapter = MotionAdapter.from_pretrained(" guoyww/animatediff-motion-adapter-v1-5-2" , torch_dtype = torch.float16 )
168
182
# load SD 1.5 based finetuned model
169
183
model_id = " SG161222/Realistic_Vision_V5.1_noVAE"
170
- pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter = adapter)
184
+ pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter = adapter, torch_dtype = torch.float16 )
171
185
172
- pipe.load_lora_weights(" diffusers/animatediff-motion-lora-zoom-out" , adapter_name = " zoom-out" )
173
- pipe.load_lora_weights(" diffusers/animatediff-motion-lora-pan-left" , adapter_name = " pan-left" )
186
+ pipe.load_lora_weights(
187
+ " diffusers/animatediff-motion-lora-zoom-out" , adapter_name = " zoom-out" ,
188
+ )
189
+ pipe.load_lora_weights(
190
+ " diffusers/animatediff-motion-lora-pan-left" , adapter_name = " pan-left" ,
191
+ )
174
192
pipe.set_adapters([" zoom-out" , " pan-left" ], adapter_weights = [1.0 , 1.0 ])
175
193
176
194
scheduler = DDIMScheduler.from_pretrained(
177
- model_id, subfolder = " scheduler" , clip_sample = False , timestep_spacing = " linspace" , steps_offset = 1
195
+ model_id,
196
+ subfolder = " scheduler" ,
197
+ clip_sample = False ,
198
+ timestep_spacing = " linspace" ,
199
+ beta_schedule = " linear" ,
200
+ steps_offset = 1 ,
178
201
)
179
202
pipe.scheduler = scheduler
180
203
@@ -197,6 +220,7 @@ output = pipe(
197
220
)
198
221
frames = output.frames[0 ]
199
222
export_to_gif(frames, " animation.gif" )
223
+
200
224
```
201
225
202
226
<table >
0 commit comments