Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion src/transformers/models/blip/processing_blip.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,6 @@ class BlipProcessor(ProcessorMixin):
def __init__(self, image_processor, tokenizer, **kwargs):
tokenizer.return_token_type_ids = False
super().__init__(image_processor, tokenizer)
self.current_processor = self.image_processor

def __call__(
self,
Expand Down
1 change: 0 additions & 1 deletion src/transformers/models/blip_2/processing_blip_2.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,6 @@ class Blip2Processor(ProcessorMixin):

def __init__(self, image_processor, tokenizer, num_query_tokens=None, **kwargs):
tokenizer.return_token_type_ids = False
self.current_processor = image_processor
if not hasattr(tokenizer, "image_token"):
self.image_token = AddedToken("<image>", normalized=False, special=True)
tokenizer.add_tokens([self.image_token], special_tokens=True)
Expand Down
22 changes: 0 additions & 22 deletions src/transformers/models/chinese_clip/processing_chinese_clip.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,6 @@
Image/Text processor class for Chinese-CLIP
"""

import warnings

from ...processing_utils import ProcessorMixin


Expand All @@ -41,27 +39,7 @@ class ChineseCLIPProcessor(ProcessorMixin):
tokenizer_class = ("BertTokenizer", "BertTokenizerFast")

def __init__(self, image_processor=None, tokenizer=None, **kwargs):
feature_extractor = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.",
FutureWarning,
)
feature_extractor = kwargs.pop("feature_extractor")

image_processor = image_processor if image_processor is not None else feature_extractor

super().__init__(image_processor, tokenizer)
self.current_processor = self.image_processor

@property
def feature_extractor_class(self):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
FutureWarning,
)
return self.image_processor_class


__all__ = ["ChineseCLIPProcessor"]
29 changes: 0 additions & 29 deletions src/transformers/models/clip/processing_clip.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,6 @@
Image/Text processor class for CLIP
"""

import warnings

from ...processing_utils import ProcessorMixin


Expand All @@ -40,34 +38,7 @@ class CLIPProcessor(ProcessorMixin):
tokenizer_class = "AutoTokenizer"

def __init__(self, image_processor=None, tokenizer=None, **kwargs):
feature_extractor = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.",
FutureWarning,
)
feature_extractor = kwargs.pop("feature_extractor")

image_processor = image_processor if image_processor is not None else feature_extractor

super().__init__(image_processor, tokenizer)

@property
def feature_extractor_class(self):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
FutureWarning,
)
return self.image_processor_class

@property
def feature_extractor(self):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",
FutureWarning,
)
return self.image_processor


__all__ = ["CLIPProcessor"]
29 changes: 0 additions & 29 deletions src/transformers/models/clipseg/processing_clipseg.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,6 @@
Image/Text processor class for CLIPSeg
"""

import warnings

from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding

Expand All @@ -41,17 +39,6 @@ class CLIPSegProcessor(ProcessorMixin):
tokenizer_class = ("CLIPTokenizer", "CLIPTokenizerFast")

def __init__(self, image_processor=None, tokenizer=None, **kwargs):
feature_extractor = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.",
FutureWarning,
)
feature_extractor = kwargs.pop("feature_extractor")

image_processor = image_processor if image_processor is not None else feature_extractor

super().__init__(image_processor, tokenizer)

def __call__(self, text=None, images=None, visual_prompt=None, return_tensors=None, **kwargs):
Expand Down Expand Up @@ -124,21 +111,5 @@ def __call__(self, text=None, images=None, visual_prompt=None, return_tensors=No
else:
return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors)

@property
def feature_extractor_class(self):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
FutureWarning,
)
return self.image_processor_class

@property
def feature_extractor(self):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",
FutureWarning,
)
return self.image_processor


__all__ = ["CLIPSegProcessor"]
50 changes: 0 additions & 50 deletions src/transformers/models/donut/processing_donut.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,6 @@
"""

import re
import warnings
from contextlib import contextmanager
from typing import Optional, Union

from ...image_utils import ImageInput
Expand Down Expand Up @@ -55,20 +53,7 @@ class DonutProcessor(ProcessorMixin):
tokenizer_class = "AutoTokenizer"

def __init__(self, image_processor=None, tokenizer=None, **kwargs):
feature_extractor = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.",
FutureWarning,
)
feature_extractor = kwargs.pop("feature_extractor")

image_processor = image_processor if image_processor is not None else feature_extractor

super().__init__(image_processor, tokenizer)
self.current_processor = self.image_processor
self._in_target_context_manager = False

def __call__(
self,
Expand All @@ -82,9 +67,6 @@ def __call__(
[`~DonutProcessor.as_target_processor`] this method forwards all its arguments to DonutTokenizer's
[`~DonutTokenizer.__call__`]. Please refer to the docstring of the above two methods for more information.
"""
if self._in_target_context_manager:
return self.current_processor(images, text, **kwargs)

if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process.")

Expand Down Expand Up @@ -116,22 +98,6 @@ def model_input_names(self):

return list(image_processor_input_names + ["input_ids", "labels"])

@contextmanager
def as_target_processor(self):
"""
Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning TrOCR.
"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your images inputs, or in a separate call."
)
self._in_target_context_manager = True
self.current_processor = self.tokenizer
yield
self.current_processor = self.image_processor
self._in_target_context_manager = False

def token2json(self, tokens, is_inner_value=False, added_vocab=None):
"""
Convert a (generated) token sequence into an ordered JSON format.
Expand Down Expand Up @@ -190,21 +156,5 @@ def token2json(self, tokens, is_inner_value=False, added_vocab=None):
else:
return [] if is_inner_value else {"text_sequence": tokens}

@property
def feature_extractor_class(self):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
FutureWarning,
)
return self.image_processor_class

@property
def feature_extractor(self):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",
FutureWarning,
)
return self.image_processor


__all__ = ["DonutProcessor"]
29 changes: 0 additions & 29 deletions src/transformers/models/flava/processing_flava.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,6 @@
Image/Text processor class for FLAVA
"""

import warnings

from ...processing_utils import ProcessorMixin


Expand All @@ -38,34 +36,7 @@ class FlavaProcessor(ProcessorMixin):
tokenizer_class = ("BertTokenizer", "BertTokenizerFast")

def __init__(self, image_processor=None, tokenizer=None, **kwargs):
feature_extractor = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.",
FutureWarning,
)
feature_extractor = kwargs.pop("feature_extractor")

image_processor = image_processor if image_processor is not None else feature_extractor
super().__init__(image_processor, tokenizer)
self.current_processor = self.image_processor

@property
def feature_extractor_class(self):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
FutureWarning,
)
return self.image_processor_class

@property
def feature_extractor(self):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",
FutureWarning,
)
return self.image_processor


__all__ = ["FlavaProcessor"]
1 change: 0 additions & 1 deletion src/transformers/models/fuyu/processing_fuyu.py
Original file line number Diff line number Diff line change
Expand Up @@ -531,7 +531,6 @@ def __call__(

if text is not None and images is None:
logger.warning("You are processing a text with no associated image. Make sure it is intended.")
self.current_processor = self.tokenizer
text_encoding = self.tokenizer(text, **output_kwargs["text_kwargs"])
return text_encoding

Expand Down
1 change: 0 additions & 1 deletion src/transformers/models/git/processing_git.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@ class GitProcessor(ProcessorMixin):

def __init__(self, image_processor, tokenizer):
super().__init__(image_processor, tokenizer)
self.current_processor = self.image_processor


__all__ = ["GitProcessor"]
1 change: 0 additions & 1 deletion src/transformers/models/idefics/processing_idefics.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,6 @@ class IdeficsProcessor(ProcessorMixin):

def __init__(self, image_processor, tokenizer=None, image_size=224, add_end_of_utterance_token=None, **kwargs):
super().__init__(image_processor, tokenizer)
self.current_processor = self.image_processor
self.image_token_id = (
tokenizer.image_token_id
if hasattr(tokenizer, "image_token")
Expand Down
28 changes: 0 additions & 28 deletions src/transformers/models/layoutlmv2/processing_layoutlmv2.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@
Processor class for LayoutLMv2.
"""

import warnings
from typing import Optional, Union

from ...processing_utils import ProcessorMixin
Expand Down Expand Up @@ -49,17 +48,6 @@ class LayoutLMv2Processor(ProcessorMixin):
tokenizer_class = ("LayoutLMv2Tokenizer", "LayoutLMv2TokenizerFast")

def __init__(self, image_processor=None, tokenizer=None, **kwargs):
feature_extractor = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.",
FutureWarning,
)
feature_extractor = kwargs.pop("feature_extractor")

image_processor = image_processor if image_processor is not None else feature_extractor

super().__init__(image_processor, tokenizer)

def __call__(
Expand Down Expand Up @@ -166,21 +154,5 @@ def get_overflowing_images(self, images, overflow_to_sample_mapping):
def model_input_names(self):
return ["input_ids", "bbox", "token_type_ids", "attention_mask", "image"]

@property
def feature_extractor_class(self):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
FutureWarning,
)
return self.image_processor_class

@property
def feature_extractor(self):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",
FutureWarning,
)
return self.image_processor


__all__ = ["LayoutLMv2Processor"]
28 changes: 0 additions & 28 deletions src/transformers/models/layoutlmv3/processing_layoutlmv3.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@
Processor class for LayoutLMv3.
"""

import warnings
from typing import Optional, Union

from ...processing_utils import ProcessorMixin
Expand Down Expand Up @@ -49,17 +48,6 @@ class LayoutLMv3Processor(ProcessorMixin):
tokenizer_class = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")

def __init__(self, image_processor=None, tokenizer=None, **kwargs):
feature_extractor = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.",
FutureWarning,
)
feature_extractor = kwargs.pop("feature_extractor")

image_processor = image_processor if image_processor is not None else feature_extractor

super().__init__(image_processor, tokenizer)

def __call__(
Expand Down Expand Up @@ -164,21 +152,5 @@ def get_overflowing_images(self, images, overflow_to_sample_mapping):
def model_input_names(self):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]

@property
def feature_extractor_class(self):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
FutureWarning,
)
return self.image_processor_class

@property
def feature_extractor(self):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",
FutureWarning,
)
return self.image_processor


__all__ = ["LayoutLMv3Processor"]
Loading