Skip to content

Commit 6bc1dd5

Browse files
committed
rename do_classifier_free_guidance->prepare_unconditional_embeds
1 parent 53ebfa1 commit 6bc1dd5

File tree

2 files changed

+18
-19
lines changed

2 files changed

+18
-19
lines changed

src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_modular.py

Lines changed: 17 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -233,10 +233,10 @@ def encode_image(self, components, image, device, num_images_per_prompt, output_
233233

234234
# modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds
235235
def prepare_ip_adapter_image_embeds(
236-
self, components, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance
236+
self, components, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, prepare_unconditional_embeds
237237
):
238238
image_embeds = []
239-
if do_classifier_free_guidance:
239+
if prepare_unconditional_embeds:
240240
negative_image_embeds = []
241241
if ip_adapter_image_embeds is None:
242242
if not isinstance(ip_adapter_image, list):
@@ -256,19 +256,19 @@ def prepare_ip_adapter_image_embeds(
256256
)
257257

258258
image_embeds.append(single_image_embeds[None, :])
259-
if do_classifier_free_guidance:
259+
if prepare_unconditional_embeds:
260260
negative_image_embeds.append(single_negative_image_embeds[None, :])
261261
else:
262262
for single_image_embeds in ip_adapter_image_embeds:
263-
if do_classifier_free_guidance:
263+
if prepare_unconditional_embeds:
264264
single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2)
265265
negative_image_embeds.append(single_negative_image_embeds)
266266
image_embeds.append(single_image_embeds)
267267

268268
ip_adapter_image_embeds = []
269269
for i, single_image_embeds in enumerate(image_embeds):
270270
single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0)
271-
if do_classifier_free_guidance:
271+
if prepare_unconditional_embeds:
272272
single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0)
273273
single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0)
274274

@@ -281,7 +281,7 @@ def prepare_ip_adapter_image_embeds(
281281
def __call__(self, pipeline, state: PipelineState) -> PipelineState:
282282
data = self.get_block_state(state)
283283

284-
data.do_classifier_free_guidance = pipeline.guider.num_conditions > 1
284+
data.prepare_unconditional_embeds = pipeline.guider.num_conditions > 1
285285
data.device = pipeline._execution_device
286286

287287
data.ip_adapter_embeds = self.prepare_ip_adapter_image_embeds(
@@ -290,9 +290,9 @@ def __call__(self, pipeline, state: PipelineState) -> PipelineState:
290290
ip_adapter_image_embeds=None,
291291
device=data.device,
292292
num_images_per_prompt=1,
293-
do_classifier_free_guidance=data.do_classifier_free_guidance,
293+
prepare_unconditional_embeds=data.prepare_unconditional_embeds,
294294
)
295-
if data.do_classifier_free_guidance:
295+
if data.prepare_unconditional_embeds:
296296
data.negative_ip_adapter_embeds = []
297297
for i, image_embeds in enumerate(data.ip_adapter_embeds):
298298
negative_image_embeds, image_embeds = image_embeds.chunk(2)
@@ -355,15 +355,14 @@ def check_inputs(self, pipeline, data):
355355
elif data.prompt_2 is not None and (not isinstance(data.prompt_2, str) and not isinstance(data.prompt_2, list)):
356356
raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(data.prompt_2)}")
357357

358-
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt with self -> components
359358
def encode_prompt(
360359
self,
361360
components,
362361
prompt: str,
363362
prompt_2: Optional[str] = None,
364363
device: Optional[torch.device] = None,
365364
num_images_per_prompt: int = 1,
366-
do_classifier_free_guidance: bool = True,
365+
prepare_unconditional_embeds: bool = True,
367366
negative_prompt: Optional[str] = None,
368367
negative_prompt_2: Optional[str] = None,
369368
prompt_embeds: Optional[torch.Tensor] = None,
@@ -386,8 +385,8 @@ def encode_prompt(
386385
torch device
387386
num_images_per_prompt (`int`):
388387
number of images that should be generated per prompt
389-
do_classifier_free_guidance (`bool`):
390-
whether to use classifier free guidance or not
388+
prepare_unconditional_embeds (`bool`):
389+
whether to use prepare unconditional embeddings or not
391390
negative_prompt (`str` or `List[str]`, *optional*):
392391
The prompt or prompts not to guide the image generation. If not defined, one has to pass
393392
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
@@ -495,10 +494,10 @@ def encode_prompt(
495494

496495
# get unconditional embeddings for classifier free guidance
497496
zero_out_negative_prompt = negative_prompt is None and components.config.force_zeros_for_empty_prompt
498-
if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
497+
if prepare_unconditional_embeds and negative_prompt_embeds is None and zero_out_negative_prompt:
499498
negative_prompt_embeds = torch.zeros_like(prompt_embeds)
500499
negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
501-
elif do_classifier_free_guidance and negative_prompt_embeds is None:
500+
elif prepare_unconditional_embeds and negative_prompt_embeds is None:
502501
negative_prompt = negative_prompt or ""
503502
negative_prompt_2 = negative_prompt_2 or negative_prompt
504503

@@ -559,7 +558,7 @@ def encode_prompt(
559558
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
560559
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
561560

562-
if do_classifier_free_guidance:
561+
if prepare_unconditional_embeds:
563562
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
564563
seq_len = negative_prompt_embeds.shape[1]
565564

@@ -574,7 +573,7 @@ def encode_prompt(
574573
pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
575574
bs_embed * num_images_per_prompt, -1
576575
)
577-
if do_classifier_free_guidance:
576+
if prepare_unconditional_embeds:
578577
negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
579578
bs_embed * num_images_per_prompt, -1
580579
)
@@ -598,7 +597,7 @@ def __call__(self, pipeline, state: PipelineState) -> PipelineState:
598597
data = self.get_block_state(state)
599598
self.check_inputs(pipeline, data)
600599

601-
data.do_classifier_free_guidance = pipeline.guider.num_conditions > 1
600+
data.prepare_unconditional_embeds = pipeline.guider.num_conditions > 1
602601
data.device = pipeline._execution_device
603602

604603
# Encode input prompt
@@ -616,7 +615,7 @@ def __call__(self, pipeline, state: PipelineState) -> PipelineState:
616615
data.prompt_2,
617616
data.device,
618617
1,
619-
data.do_classifier_free_guidance,
618+
data.prepare_unconditional_embeds,
620619
data.negative_prompt,
621620
data.negative_prompt_2,
622621
prompt_embeds=None,

src/diffusers/schedulers/scheduling_euler_discrete.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -669,7 +669,7 @@ def step(
669669
dt = self.sigmas[self.step_index + 1] - sigma_hat
670670

671671
prev_sample = sample + derivative * dt
672-
672+
673673
# Cast sample back to model compatible dtype
674674
prev_sample = prev_sample.to(model_output.dtype)
675675

0 commit comments

Comments
 (0)