Skip to content

Commit a3befb5

Browse files
committed
fix copied from statement
1 parent 1cb8ec9 commit a3befb5

File tree

2 files changed

+14
-14
lines changed

2 files changed

+14
-14
lines changed

src/diffusers/pipelines/qwenimage/pipeline_qwenimage_img2img.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -182,7 +182,7 @@ def __init__(
182182
self.prompt_template_encode_start_idx = 34
183183
self.default_sample_size = 128
184184

185-
# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage._extract_masked_hidden
185+
# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline._extract_masked_hidden
186186
def _extract_masked_hidden(self, hidden_states: torch.Tensor, mask: torch.Tensor):
187187
bool_mask = mask.bool()
188188
valid_lengths = bool_mask.sum(dim=1)
@@ -191,7 +191,7 @@ def _extract_masked_hidden(self, hidden_states: torch.Tensor, mask: torch.Tensor
191191

192192
return split_result
193193

194-
# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage._get_qwen_prompt_embeds
194+
# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline._get_qwen_prompt_embeds
195195
def _get_qwen_prompt_embeds(
196196
self,
197197
prompt: Union[str, List[str]] = None,
@@ -265,7 +265,7 @@ def get_timesteps(self, num_inference_steps, strength, device):
265265

266266
return timesteps, num_inference_steps - t_start
267267

268-
# Copied fromCopied from diffusers.pipelines.qwenimage.pipeline_qwenimage.encode_prompt
268+
# Copied fromCopied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline.encode_prompt
269269
def encode_prompt(
270270
self,
271271
prompt: Union[str, List[str]],
@@ -304,7 +304,7 @@ def encode_prompt(
304304

305305
return prompt_embeds, prompt_embeds_mask
306306

307-
# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.check_inputs
307+
# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline.check_inputs
308308
def check_inputs(
309309
self,
310310
prompt,
@@ -365,7 +365,7 @@ def check_inputs(
365365
raise ValueError(f"`max_sequence_length` cannot be greater than 1024 but is {max_sequence_length}")
366366

367367
@staticmethod
368-
# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage._prepare_latent_image_ids
368+
# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline._prepare_latent_image_ids
369369
def _prepare_latent_image_ids(batch_size, height, width, device, dtype):
370370
latent_image_ids = torch.zeros(height, width, 3)
371371
latent_image_ids[..., 1] = latent_image_ids[..., 1] + torch.arange(height)[:, None]
@@ -380,7 +380,7 @@ def _prepare_latent_image_ids(batch_size, height, width, device, dtype):
380380
return latent_image_ids.to(device=device, dtype=dtype)
381381

382382
@staticmethod
383-
# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage._pack_latents
383+
# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline._pack_latents
384384
def _pack_latents(latents, batch_size, num_channels_latents, height, width):
385385
latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2)
386386
latents = latents.permute(0, 2, 4, 1, 3, 5)
@@ -389,7 +389,7 @@ def _pack_latents(latents, batch_size, num_channels_latents, height, width):
389389
return latents
390390

391391
@staticmethod
392-
# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage._unpack_latents
392+
# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline._unpack_latents
393393
def _unpack_latents(latents, height, width, vae_scale_factor):
394394
batch_size, num_patches, channels = latents.shape
395395

src/diffusers/pipelines/qwenimage/pipeline_qwenimage_inpaint.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -192,7 +192,7 @@ def __init__(
192192
self.prompt_template_encode_start_idx = 34
193193
self.default_sample_size = 128
194194

195-
# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage._extract_masked_hidden
195+
# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline._extract_masked_hidden
196196
def _extract_masked_hidden(self, hidden_states: torch.Tensor, mask: torch.Tensor):
197197
bool_mask = mask.bool()
198198
valid_lengths = bool_mask.sum(dim=1)
@@ -201,7 +201,7 @@ def _extract_masked_hidden(self, hidden_states: torch.Tensor, mask: torch.Tensor
201201

202202
return split_result
203203

204-
# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage._get_qwen_prompt_embeds
204+
# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline._get_qwen_prompt_embeds
205205
def _get_qwen_prompt_embeds(
206206
self,
207207
prompt: Union[str, List[str]] = None,
@@ -240,7 +240,7 @@ def _get_qwen_prompt_embeds(
240240

241241
return prompt_embeds, encoder_attention_mask
242242

243-
# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage_img2img._encode_vae_image
243+
# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage_img2img.QwenImagePipeline._encode_vae_image
244244
def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator):
245245
if isinstance(generator, list):
246246
image_latents = [
@@ -276,7 +276,7 @@ def get_timesteps(self, num_inference_steps, strength, device):
276276

277277
return timesteps, num_inference_steps - t_start
278278

279-
# Copied fromCopied from diffusers.pipelines.qwenimage.pipeline_qwenimage.encode_prompt
279+
# Copied fromCopied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline.encode_prompt
280280
def encode_prompt(
281281
self,
282282
prompt: Union[str, List[str]],
@@ -315,7 +315,7 @@ def encode_prompt(
315315

316316
return prompt_embeds, prompt_embeds_mask
317317

318-
# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.check_inputs
318+
# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline.check_inputs
319319
def check_inputs(
320320
self,
321321
prompt,
@@ -392,7 +392,7 @@ def check_inputs(
392392
raise ValueError(f"`max_sequence_length` cannot be greater than 1024 but is {max_sequence_length}")
393393

394394
@staticmethod
395-
# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage._prepare_latent_image_ids
395+
# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline._prepare_latent_image_ids
396396
def _prepare_latent_image_ids(batch_size, height, width, device, dtype):
397397
latent_image_ids = torch.zeros(height, width, 3)
398398
latent_image_ids[..., 1] = latent_image_ids[..., 1] + torch.arange(height)[:, None]
@@ -416,7 +416,7 @@ def _pack_latents(latents, batch_size, num_channels_latents, height, width):
416416
return latents
417417

418418
@staticmethod
419-
# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage._unpack_latents
419+
# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline._unpack_latents
420420
def _unpack_latents(latents, height, width, vae_scale_factor):
421421
batch_size, num_patches, channels = latents.shape
422422

0 commit comments

Comments
 (0)