|
49 | 49 | """ |
50 | 50 |
|
51 | 51 |
|
52 | | -# Similar to diffusers.pipelines.hunyuandit.pipeline_hunyuandit.get_resize_crop_region_for_grid |
53 | | -def get_resize_crop_region_for_grid(src, tgt_width, tgt_height): |
54 | | - tw = tgt_width |
55 | | - th = tgt_height |
56 | | - h, w = src |
57 | | - r = h / w |
58 | | - if r > (th / tw): |
59 | | - resize_height = th |
60 | | - resize_width = int(round(th / h * w)) |
61 | | - else: |
62 | | - resize_width = tw |
63 | | - resize_height = int(round(tw / w * h)) |
64 | | - |
65 | | - crop_top = int(round((th - resize_height) / 2.0)) |
66 | | - crop_left = int(round((tw - resize_width) / 2.0)) |
67 | | - |
68 | | - return (crop_top, crop_left), (crop_top + resize_height, crop_left + resize_width) |
69 | | - |
70 | | - |
71 | 52 | # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps |
72 | 53 | def retrieve_timesteps( |
73 | 54 | scheduler, |
@@ -179,6 +160,7 @@ def __init__( |
179 | 160 |
|
180 | 161 | self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) |
181 | 162 |
|
| 163 | + # Copied from diffusers.pipelines.cogvideo.pipeline_cogvideox.CogVideoXPipeline._get_t5_prompt_embeds |
182 | 164 | def _get_t5_prompt_embeds( |
183 | 165 | self, |
184 | 166 | prompt: Union[str, List[str]] = None, |
@@ -278,8 +260,10 @@ def encode_prompt( |
278 | 260 | dtype=dtype, |
279 | 261 | ) |
280 | 262 |
|
| 263 | + if do_classifier_free_guidance and negative_prompt is None: |
| 264 | + negative_prompt_embeds = prompt_embeds.new_zeros(prompt_embeds.shape) |
| 265 | + |
281 | 266 | if do_classifier_free_guidance and negative_prompt_embeds is None: |
282 | | - negative_prompt = negative_prompt or "" |
283 | 267 | negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt |
284 | 268 |
|
285 | 269 | if prompt is not None and type(prompt) is not type(negative_prompt): |
|
0 commit comments