Skip to content

Commit 43c2cd5

Browse files
committed
fix copies
1 parent defac21 commit 43c2cd5

File tree

4 files changed

+24
-0
lines changed

4 files changed

+24
-0
lines changed

src/diffusers/pipelines/flux/pipeline_flux_controlnet_image_to_image.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -247,6 +247,9 @@ def _get_t5_prompt_embeds(
247247
prompt = [prompt] if isinstance(prompt, str) else prompt
248248
batch_size = len(prompt)
249249

250+
if isinstance(self, TextualInversionLoaderMixin):
251+
prompt = self.maybe_convert_prompt(prompt, self.tokenizer_2)
252+
250253
text_inputs = self.tokenizer_2(
251254
prompt,
252255
padding="max_length",
@@ -291,6 +294,9 @@ def _get_clip_prompt_embeds(
291294
prompt = [prompt] if isinstance(prompt, str) else prompt
292295
batch_size = len(prompt)
293296

297+
if isinstance(self, TextualInversionLoaderMixin):
298+
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
299+
294300
text_inputs = self.tokenizer(
295301
prompt,
296302
padding="max_length",

src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -257,6 +257,9 @@ def _get_t5_prompt_embeds(
257257
prompt = [prompt] if isinstance(prompt, str) else prompt
258258
batch_size = len(prompt)
259259

260+
if isinstance(self, TextualInversionLoaderMixin):
261+
prompt = self.maybe_convert_prompt(prompt, self.tokenizer_2)
262+
260263
text_inputs = self.tokenizer_2(
261264
prompt,
262265
padding="max_length",
@@ -301,6 +304,9 @@ def _get_clip_prompt_embeds(
301304
prompt = [prompt] if isinstance(prompt, str) else prompt
302305
batch_size = len(prompt)
303306

307+
if isinstance(self, TextualInversionLoaderMixin):
308+
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
309+
304310
text_inputs = self.tokenizer(
305311
prompt,
306312
padding="max_length",

src/diffusers/pipelines/flux/pipeline_flux_img2img.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -235,6 +235,9 @@ def _get_t5_prompt_embeds(
235235
prompt = [prompt] if isinstance(prompt, str) else prompt
236236
batch_size = len(prompt)
237237

238+
if isinstance(self, TextualInversionLoaderMixin):
239+
prompt = self.maybe_convert_prompt(prompt, self.tokenizer_2)
240+
238241
text_inputs = self.tokenizer_2(
239242
prompt,
240243
padding="max_length",
@@ -279,6 +282,9 @@ def _get_clip_prompt_embeds(
279282
prompt = [prompt] if isinstance(prompt, str) else prompt
280283
batch_size = len(prompt)
281284

285+
if isinstance(self, TextualInversionLoaderMixin):
286+
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
287+
282288
text_inputs = self.tokenizer(
283289
prompt,
284290
padding="max_length",

src/diffusers/pipelines/flux/pipeline_flux_inpaint.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -239,6 +239,9 @@ def _get_t5_prompt_embeds(
239239
prompt = [prompt] if isinstance(prompt, str) else prompt
240240
batch_size = len(prompt)
241241

242+
if isinstance(self, TextualInversionLoaderMixin):
243+
prompt = self.maybe_convert_prompt(prompt, self.tokenizer_2)
244+
242245
text_inputs = self.tokenizer_2(
243246
prompt,
244247
padding="max_length",
@@ -283,6 +286,9 @@ def _get_clip_prompt_embeds(
283286
prompt = [prompt] if isinstance(prompt, str) else prompt
284287
batch_size = len(prompt)
285288

289+
if isinstance(self, TextualInversionLoaderMixin):
290+
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
291+
286292
text_inputs = self.tokenizer(
287293
prompt,
288294
padding="max_length",

0 commit comments

Comments
 (0)