Skip to content

Commit 0c2f1cc

Browse files
[Import] Don't force transformers to be installed (#5035)
* [Import] Don't force transformers to be installed * make style
1 parent 47f2d2c commit 0c2f1cc

File tree

1 file changed

+5
-5
lines changed

1 file changed

+5
-5
lines changed

src/diffusers/loaders.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@
4141

4242

4343
if is_transformers_available():
44-
from transformers import CLIPTextModel, CLIPTextModelWithProjection, PreTrainedModel, PreTrainedTokenizer
44+
from transformers import CLIPTextModel, CLIPTextModelWithProjection
4545

4646
if is_accelerate_available():
4747
from accelerate import init_empty_weights
@@ -627,7 +627,7 @@ class TextualInversionLoaderMixin:
627627
Load textual inversion tokens and embeddings to the tokenizer and text encoder.
628628
"""
629629

630-
def maybe_convert_prompt(self, prompt: Union[str, List[str]], tokenizer: "PreTrainedTokenizer"):
630+
def maybe_convert_prompt(self, prompt: Union[str, List[str]], tokenizer: "PreTrainedTokenizer"): # noqa: F821
631631
r"""
632632
Processes prompts that include a special token corresponding to a multi-vector textual inversion embedding to
633633
be replaced with multiple special tokens each corresponding to one of the vectors. If the prompt has no textual
@@ -654,7 +654,7 @@ def maybe_convert_prompt(self, prompt: Union[str, List[str]], tokenizer: "PreTra
654654

655655
return prompts
656656

657-
def _maybe_convert_prompt(self, prompt: str, tokenizer: "PreTrainedTokenizer"):
657+
def _maybe_convert_prompt(self, prompt: str, tokenizer: "PreTrainedTokenizer"): # noqa: F821
658658
r"""
659659
Maybe convert a prompt into a "multi vector"-compatible prompt. If the prompt includes a token that corresponds
660660
to a multi-vector textual inversion embedding, this function will process the prompt so that the special token
@@ -688,8 +688,8 @@ def load_textual_inversion(
688688
self,
689689
pretrained_model_name_or_path: Union[str, List[str], Dict[str, torch.Tensor], List[Dict[str, torch.Tensor]]],
690690
token: Optional[Union[str, List[str]]] = None,
691-
tokenizer: Optional[PreTrainedTokenizer] = None,
692-
text_encoder: Optional[PreTrainedModel] = None,
691+
tokenizer: Optional["PreTrainedTokenizer"] = None, # noqa: F821
692+
text_encoder: Optional["PreTrainedModel"] = None, # noqa: F821
693693
**kwargs,
694694
):
695695
r"""

0 commit comments

Comments
 (0)