Skip to content

Commit 56d1073

Browse files
committed
add copied from
1 parent 58d403b commit 56d1073

File tree

1 file changed

+7
-1
lines changed

1 file changed

+7
-1
lines changed

src/diffusers/pipelines/qwenimage/pipeline_qwenimage_controlnet.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@
100100
```
101101
"""
102102

103-
103+
# Coped from diffusers.pipelines.qwenimage.pipeline_qwenimage.calculate_shift
104104
def calculate_shift(
105105
image_seq_len,
106106
base_seq_len: int = 256,
@@ -238,6 +238,7 @@ def __init__(
238238
self.prompt_template_encode_start_idx = 34
239239
self.default_sample_size = 128
240240

241+
# Coped from diffusers.pipelines.qwenimage.pipeline_qwenimage.extract_masked_hidden
241242
def _extract_masked_hidden(self, hidden_states: torch.Tensor, mask: torch.Tensor):
242243
bool_mask = mask.bool()
243244
valid_lengths = bool_mask.sum(dim=1)
@@ -246,6 +247,7 @@ def _extract_masked_hidden(self, hidden_states: torch.Tensor, mask: torch.Tensor
246247

247248
return split_result
248249

250+
# Coped from diffusers.pipelines.qwenimage.pipeline_qwenimage.get_qwen_prompt_embeds
249251
def _get_qwen_prompt_embeds(
250252
self,
251253
prompt: Union[str, List[str]] = None,
@@ -284,6 +286,7 @@ def _get_qwen_prompt_embeds(
284286

285287
return prompt_embeds, encoder_attention_mask
286288

289+
# Coped from diffusers.pipelines.qwenimage.pipeline_qwenimage.encode_prompt
287290
def encode_prompt(
288291
self,
289292
prompt: Union[str, List[str]],
@@ -378,6 +381,7 @@ def check_inputs(
378381
raise ValueError(f"`max_sequence_length` cannot be greater than 1024 but is {max_sequence_length}")
379382

380383
@staticmethod
384+
# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline._pack_latents
381385
def _pack_latents(latents, batch_size, num_channels_latents, height, width):
382386
latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2)
383387
latents = latents.permute(0, 2, 4, 1, 3, 5)
@@ -386,6 +390,7 @@ def _pack_latents(latents, batch_size, num_channels_latents, height, width):
386390
return latents
387391

388392
@staticmethod
393+
# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline._unpack_latents
389394
def _unpack_latents(latents, height, width, vae_scale_factor):
390395
batch_size, num_patches, channels = latents.shape
391396

@@ -430,6 +435,7 @@ def disable_vae_tiling(self):
430435
"""
431436
self.vae.disable_tiling()
432437

438+
# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline.prepare_latents
433439
def prepare_latents(
434440
self,
435441
batch_size,

0 commit comments

Comments
 (0)