diff --git a/autointent/_embedder.py b/autointent/_embedder.py index be45ced91..81a6b94c4 100644 --- a/autointent/_embedder.py +++ b/autointent/_embedder.py @@ -87,7 +87,7 @@ def __hash__(self) -> int: hasher = Hasher() for parameter in self.embedding_model.parameters(): hasher.update(parameter.detach().cpu().numpy()) - hasher.update(self.config.max_length) + hasher.update(self.config.tokenizer_config.max_length) return hasher.intdigest() def clear_ram(self) -> None: @@ -114,7 +114,7 @@ def dump(self, path: Path) -> None: model_name=str(self.config.model_name), device=self.config.device, batch_size=self.config.batch_size, - max_length=self.config.max_length, + max_length=self.config.tokenizer_config.max_length, use_cache=self.config.use_cache, ) path.mkdir(parents=True, exist_ok=True) @@ -137,6 +137,10 @@ def load(cls, path: Path | str, override_config: EmbedderConfig | None = None) - else: kwargs = metadata # type: ignore[assignment] + max_length = kwargs.pop("max_length", None) + if max_length is not None: + kwargs["tokenizer_config"] = {"max_length": max_length} + return cls(EmbedderConfig(**kwargs)) def embed(self, utterances: list[str], task_type: TaskTypeEnum | None = None) -> npt.NDArray[np.float32]: @@ -162,12 +166,12 @@ def embed(self, utterances: list[str], task_type: TaskTypeEnum | None = None) -> "Calculating embeddings with model %s, batch_size=%d, max_seq_length=%s, embedder_device=%s", self.config.model_name, self.config.batch_size, - str(self.config.max_length), + str(self.config.tokenizer_config.max_length), self.config.device, ) - if self.config.max_length is not None: - self.embedding_model.max_seq_length = self.config.max_length + if self.config.tokenizer_config.max_length is not None: + self.embedding_model.max_seq_length = self.config.tokenizer_config.max_length embeddings = self.embedding_model.encode( utterances, diff --git a/autointent/_ranker.py b/autointent/_ranker.py index 798cd1a9a..43774d5dd 100644 --- a/autointent/_ranker.py +++ b/autointent/_ranker.py @@ -113,7 +113,7 @@ def __init__( self.config.model_name, trust_remote_code=True, device=self.config.device, - max_length=self.config.max_length, # type: ignore[arg-type] + max_length=self.config.tokenizer_config.max_length, # type: ignore[arg-type] ) self._train_head = False self._clf = classifier_head @@ -252,7 +252,7 @@ def save(self, path: str) -> None: model_name=self.config.model_name, train_head=self._train_head, device=self.config.device, - max_length=self.config.max_length, + max_length=self.config.tokenizer_config.max_length, batch_size=self.config.batch_size, ) @@ -282,6 +282,10 @@ def load(cls, path: Path, override_config: CrossEncoderConfig | None = None) -> else: kwargs = metadata # type: ignore[assignment] + max_length = kwargs.pop("max_length", None) + if max_length is not None: + kwargs["tokenizer_config"] = {"max_length": max_length} + return cls( CrossEncoderConfig(**kwargs), classifier_head=clf, diff --git a/autointent/_vector_index.py b/autointent/_vector_index.py index 1f16b102d..92a313015 100644 --- a/autointent/_vector_index.py +++ b/autointent/_vector_index.py @@ -15,7 +15,7 @@ import numpy.typing as npt from autointent import Embedder -from autointent.configs import EmbedderConfig, TaskTypeEnum +from autointent.configs import EmbedderConfig, TaskTypeEnum, TokenizerConfig from autointent.custom_types import ListOfLabels @@ -195,7 +195,7 @@ def dump(self, dir_path: Path) -> None: json.dump(data, file, indent=4, ensure_ascii=False) metadata = VectorIndexMetadata( - embedder_max_length=self.embedder.config.max_length, + embedder_max_length=self.embedder.config.tokenizer_config.max_length, embedder_model_name=str(self.embedder.config.model_name), embedder_device=self.embedder.config.device, embedder_batch_size=self.embedder.config.batch_size, @@ -229,7 +229,7 @@ def load( model_name=metadata["embedder_model_name"], device=embedder_device or metadata["embedder_device"], batch_size=embedder_batch_size or metadata["embedder_batch_size"], - max_length=metadata["embedder_max_length"], + tokenizer_config=TokenizerConfig(max_length=metadata["embedder_max_length"]), use_cache=embedder_use_cache or metadata["embedder_use_cache"], ) ) diff --git a/autointent/configs/__init__.py b/autointent/configs/__init__.py index b939a5395..410d3e15f 100644 --- a/autointent/configs/__init__.py +++ b/autointent/configs/__init__.py @@ -2,14 +2,16 @@ from ._inference_node import InferenceNodeConfig from ._optimization import DataConfig, LoggingConfig -from ._transformers import CrossEncoderConfig, EmbedderConfig, TaskTypeEnum +from ._transformers import CrossEncoderConfig, EmbedderConfig, HFModelConfig, TaskTypeEnum, TokenizerConfig __all__ = [ "CrossEncoderConfig", "DataConfig", "EmbedderConfig", + "HFModelConfig", "InferenceNodeConfig", "InferenceNodeConfig", "LoggingConfig", "TaskTypeEnum", + "TokenizerConfig", ] diff --git a/autointent/configs/_transformers.py b/autointent/configs/_transformers.py index 2cbe98efb..bfc88839d 100644 --- a/autointent/configs/_transformers.py +++ b/autointent/configs/_transformers.py @@ -1,19 +1,24 @@ from enum import Enum -from typing import Any +from typing import Any, Literal from pydantic import BaseModel, ConfigDict, Field, PositiveInt from typing_extensions import Self, assert_never -class ModelConfig(BaseModel): - model_config = ConfigDict(extra="forbid") - batch_size: PositiveInt = Field(32, description="Batch size for model inference.") +class TokenizerConfig(BaseModel): + padding: bool | Literal["longest", "max_length", "do_not_pad"] = True + truncation: bool = True max_length: PositiveInt | None = Field(None, description="Maximum length of input sequences.") -class STModelConfig(ModelConfig): - model_name: str +class HFModelConfig(BaseModel): + model_config = ConfigDict(extra="forbid") + model_name: str = Field( + "prajjwal1/bert-tiny", description="Name of the hugging face repository with transformer model." + ) + batch_size: PositiveInt = Field(32, description="Batch size for model inference.") device: str | None = Field(None, description="Torch notation for CPU or CUDA.") + tokenizer_config: TokenizerConfig = Field(default_factory=TokenizerConfig) @classmethod def from_search_config(cls, values: dict[str, Any] | str | BaseModel | None) -> Self: @@ -26,7 +31,7 @@ def from_search_config(cls, values: dict[str, Any] | str | BaseModel | None) -> Model configuration. """ if values is None: - return cls() # type: ignore[call-arg] + return cls() if isinstance(values, BaseModel): return values # type: ignore[return-value] if isinstance(values, str): @@ -45,7 +50,7 @@ class TaskTypeEnum(Enum): sts = "sts" -class EmbedderConfig(STModelConfig): +class EmbedderConfig(HFModelConfig): model_name: str = Field("sentence-transformers/all-MiniLM-L6-v2", description="Name of the hugging face model.") default_prompt: str | None = Field( None, description="Default prompt for the model. This is used when no task specific prompt is not provided." @@ -105,7 +110,7 @@ def get_prompt_type(self, prompt_type: TaskTypeEnum | None) -> str | None: # no use_cache: bool = Field(False, description="Whether to use embeddings caching.") -class CrossEncoderConfig(STModelConfig): +class CrossEncoderConfig(HFModelConfig): model_name: str = Field("cross-encoder/ms-marco-MiniLM-L-6-v2", description="Name of the hugging face model.") train_head: bool = Field( False, description="Whether to train the head of the model. If False, LogReg will be trained." diff --git a/autointent/modules/scoring/_bert.py b/autointent/modules/scoring/_bert.py index 3b83ca34e..f74bed620 100644 --- a/autointent/modules/scoring/_bert.py +++ b/autointent/modules/scoring/_bert.py @@ -16,25 +16,11 @@ ) from autointent import Context -from autointent.configs import EmbedderConfig +from autointent.configs import HFModelConfig from autointent.custom_types import ListOfLabels from autointent.modules.base import BaseScorer -class TokenizerConfig: - """Configuration for tokenizer parameters.""" - - def __init__( - self, - max_length: int = 128, - padding: str = "max_length", - truncation: bool = True, - ) -> None: - self.max_length = max_length - self.padding = padding - self.truncation = truncation - - class BertScorer(BaseScorer): name = "transformer" supports_multiclass = True @@ -45,31 +31,28 @@ class BertScorer(BaseScorer): def __init__( self, - model_config: EmbedderConfig | str | dict[str, Any] | None = None, + model_config: HFModelConfig | str | dict[str, Any] | None = None, num_train_epochs: int = 3, batch_size: int = 8, learning_rate: float = 5e-5, seed: int = 0, - tokenizer_config: TokenizerConfig | None = None, ) -> None: - self.model_config = EmbedderConfig.from_search_config(model_config) + self.model_config = HFModelConfig.from_search_config(model_config) self.num_train_epochs = num_train_epochs self.batch_size = batch_size self.learning_rate = learning_rate self.seed = seed - self.tokenizer_config = tokenizer_config or TokenizerConfig() self._multilabel = False @classmethod def from_context( cls, context: Context, - model_config: EmbedderConfig | str | None = None, + model_config: HFModelConfig | str | dict[str, Any] | None = None, num_train_epochs: int = 3, batch_size: int = 8, learning_rate: float = 5e-5, seed: int = 0, - tokenizer_config: TokenizerConfig | None = None, ) -> "BertScorer": if model_config is None: model_config = context.resolve_embedder() @@ -79,7 +62,6 @@ def from_context( batch_size=batch_size, learning_rate=learning_rate, seed=seed, - tokenizer_config=tokenizer_config, ) def get_embedder_config(self) -> dict[str, Any]: @@ -114,10 +96,7 @@ def fit( def tokenize_function(examples: dict[str, Any]) -> dict[str, Any]: return self._tokenizer( # type: ignore[no-any-return] - examples["text"], - padding=self.tokenizer_config.padding, - truncation=self.tokenizer_config.truncation, - max_length=self.tokenizer_config.max_length, + examples["text"], return_tensors="pt", **self.model_config.tokenizer_config.model_dump() ) dataset = Dataset.from_dict({"text": utterances, "labels": labels}) @@ -154,9 +133,7 @@ def predict(self, utterances: list[str]) -> npt.NDArray[Any]: msg = "Model is not trained. Call fit() first." raise RuntimeError(msg) - inputs = self._tokenizer( - utterances, padding=True, truncation=True, max_length=self.tokenizer_config.max_length, return_tensors="pt" - ) + inputs = self._tokenizer(utterances, return_tensors="pt", **self.model_config.tokenizer_config.model_dump()) with torch.no_grad(): outputs = self._model(**inputs) diff --git a/autointent/modules/scoring/_mlknn/mlknn.py b/autointent/modules/scoring/_mlknn/mlknn.py index d6e8f9057..306453010 100644 --- a/autointent/modules/scoring/_mlknn/mlknn.py +++ b/autointent/modules/scoring/_mlknn/mlknn.py @@ -140,7 +140,7 @@ def fit(self, utterances: list[str], labels: ListOfLabels) -> None: model_name=self.embedder_config.model_name, device=self.embedder_config.device, batch_size=self.embedder_config.batch_size, - max_length=self.embedder_config.max_length, + tokenizer_config=self.embedder_config.tokenizer_config, use_cache=self.embedder_config.use_cache, ), ) diff --git a/autointent/modules/scoring/_sklearn/sklearn_scorer.py b/autointent/modules/scoring/_sklearn/sklearn_scorer.py index 19e1a635f..8e5ed51c2 100644 --- a/autointent/modules/scoring/_sklearn/sklearn_scorer.py +++ b/autointent/modules/scoring/_sklearn/sklearn_scorer.py @@ -128,7 +128,7 @@ def fit( model_name=self.embedder_config.model_name, device=self.embedder_config.device, batch_size=self.embedder_config.batch_size, - max_length=self.embedder_config.max_length, + tokenizer_config=self.embedder_config.tokenizer_config, use_cache=self.embedder_config.use_cache, ) ) diff --git a/docs/optimizer_config.schema.json b/docs/optimizer_config.schema.json index aabd685ac..d6a3b595a 100644 --- a/docs/optimizer_config.schema.json +++ b/docs/optimizer_config.schema.json @@ -3,6 +3,12 @@ "CrossEncoderConfig": { "additionalProperties": false, "properties": { + "model_name": { + "default": "cross-encoder/ms-marco-MiniLM-L-6-v2", + "description": "Name of the hugging face model.", + "title": "Model Name", + "type": "string" + }, "batch_size": { "default": 32, "description": "Batch size for model inference.", @@ -10,26 +16,6 @@ "title": "Batch Size", "type": "integer" }, - "max_length": { - "anyOf": [ - { - "exclusiveMinimum": 0, - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Maximum length of input sequences.", - "title": "Max Length" - }, - "model_name": { - "default": "cross-encoder/ms-marco-MiniLM-L-6-v2", - "description": "Name of the hugging face model.", - "title": "Model Name", - "type": "string" - }, "device": { "anyOf": [ { @@ -43,6 +29,9 @@ "description": "Torch notation for CPU or CUDA.", "title": "Device" }, + "tokenizer_config": { + "$ref": "#/$defs/TokenizerConfig" + }, "train_head": { "default": false, "description": "Whether to train the head of the model. If False, LogReg will be trained.", @@ -104,6 +93,12 @@ "EmbedderConfig": { "additionalProperties": false, "properties": { + "model_name": { + "default": "sentence-transformers/all-MiniLM-L6-v2", + "description": "Name of the hugging face model.", + "title": "Model Name", + "type": "string" + }, "batch_size": { "default": 32, "description": "Batch size for model inference.", @@ -111,26 +106,6 @@ "title": "Batch Size", "type": "integer" }, - "max_length": { - "anyOf": [ - { - "exclusiveMinimum": 0, - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Maximum length of input sequences.", - "title": "Max Length" - }, - "model_name": { - "default": "sentence-transformers/all-MiniLM-L6-v2", - "description": "Name of the hugging face model.", - "title": "Model Name", - "type": "string" - }, "device": { "anyOf": [ { @@ -144,6 +119,9 @@ "description": "Torch notation for CPU or CUDA.", "title": "Device" }, + "tokenizer_config": { + "$ref": "#/$defs/TokenizerConfig" + }, "default_prompt": { "anyOf": [ { @@ -301,6 +279,48 @@ }, "title": "LoggingConfig", "type": "object" + }, + "TokenizerConfig": { + "properties": { + "padding": { + "anyOf": [ + { + "type": "boolean" + }, + { + "enum": [ + "longest", + "max_length", + "do_not_pad" + ], + "type": "string" + } + ], + "default": true, + "title": "Padding" + }, + "truncation": { + "default": true, + "title": "Truncation", + "type": "boolean" + }, + "max_length": { + "anyOf": [ + { + "exclusiveMinimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum length of input sequences.", + "title": "Max Length" + } + }, + "title": "TokenizerConfig", + "type": "object" } }, "description": "Configuration for the optimization process.\n\nOne can use it to customize optimization beyond choosing different preset.\nInstantiate it and pass to :py:meth:`autointent.Pipeline.from_optimization_config`.", @@ -334,10 +354,14 @@ "embedder_config": { "$ref": "#/$defs/EmbedderConfig", "default": { - "batch_size": 32, - "max_length": null, "model_name": "sentence-transformers/all-MiniLM-L6-v2", + "batch_size": 32, "device": null, + "tokenizer_config": { + "max_length": null, + "padding": true, + "truncation": true + }, "default_prompt": null, "classifier_prompt": null, "cluster_prompt": null, @@ -350,10 +374,14 @@ "cross_encoder_config": { "$ref": "#/$defs/CrossEncoderConfig", "default": { - "batch_size": 32, - "max_length": null, "model_name": "cross-encoder/ms-marco-MiniLM-L-6-v2", + "batch_size": 32, "device": null, + "tokenizer_config": { + "max_length": null, + "padding": true, + "truncation": true + }, "train_head": false } }, diff --git a/pyproject.toml b/pyproject.toml index 9a2fad329..316bd2878 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -44,6 +44,7 @@ dependencies = [ "datasets (>=3.2.0,<4.0.0)", "xxhash (>=3.5.0,<4.0.0)", "python-dotenv (>=1.0.1,<2.0.0)", + "transformers[torch] (>=4.49.0,<5.0.0)", ] [project.urls] diff --git a/tests/callback/test_callback.py b/tests/callback/test_callback.py index 986b5d348..bbb094f4a 100644 --- a/tests/callback/test_callback.py +++ b/tests/callback/test_callback.py @@ -125,7 +125,7 @@ def test_pipeline_callbacks(dataset): "cluster_prompt": None, "default_prompt": None, "device": None, - "max_length": None, + "tokenizer_config": {"max_length": None, "truncation": True, "padding": True}, "model_name": "sergeyzh/rubert-tiny-turbo", "passage_prompt": None, "query_prompt": None, @@ -151,7 +151,7 @@ def test_pipeline_callbacks(dataset): "cluster_prompt": None, "default_prompt": None, "device": None, - "max_length": None, + "tokenizer_config": {"max_length": None, "truncation": True, "padding": True}, "model_name": "sergeyzh/rubert-tiny-turbo", "passage_prompt": None, "query_prompt": None, @@ -177,7 +177,7 @@ def test_pipeline_callbacks(dataset): "cluster_prompt": None, "default_prompt": None, "device": None, - "max_length": None, + "tokenizer_config": {"max_length": None, "truncation": True, "padding": True}, "model_name": "sergeyzh/rubert-tiny-turbo", "passage_prompt": None, "query_prompt": None, diff --git a/tests/pipeline/test_inference.py b/tests/pipeline/test_inference.py index a5e33c180..683112b49 100644 --- a/tests/pipeline/test_inference.py +++ b/tests/pipeline/test_inference.py @@ -1,7 +1,7 @@ import pytest from autointent import Pipeline -from autointent.configs import EmbedderConfig, LoggingConfig +from autointent.configs import EmbedderConfig, LoggingConfig, TokenizerConfig from autointent.custom_types import NodeType from tests.conftest import get_search_space, setup_environment @@ -99,7 +99,9 @@ def test_load_with_overrided_params(dataset): context.dump() # case 1: simple inference from file system - inference_pipeline = Pipeline.load(logging_config.dirpath, embedder_config=EmbedderConfig(max_length=8)) + inference_pipeline = Pipeline.load( + logging_config.dirpath, embedder_config=EmbedderConfig(tokenizer_config=TokenizerConfig(max_length=8)) + ) utterances = ["123", "hello world"] prediction = inference_pipeline.predict(utterances) assert len(prediction) == 2 @@ -107,17 +109,19 @@ def test_load_with_overrided_params(dataset): # case 2: rich inference from file system rich_outputs = inference_pipeline.predict_with_metadata(utterances) assert len(rich_outputs.predictions) == len(utterances) - assert inference_pipeline.nodes[NodeType.scoring].module._embedder.config.max_length == 8 + assert inference_pipeline.nodes[NodeType.scoring].module._embedder.config.tokenizer_config.max_length == 8 del inference_pipeline # case 3: dump and then load pipeline pipeline_optimizer.dump() del pipeline_optimizer - loaded_pipe = Pipeline.load(logging_config.dirpath, embedder_config=EmbedderConfig(max_length=8)) + loaded_pipe = Pipeline.load( + logging_config.dirpath, embedder_config=EmbedderConfig(tokenizer_config=TokenizerConfig(max_length=8)) + ) prediction_v2 = loaded_pipe.predict(utterances) assert prediction == prediction_v2 - assert loaded_pipe.nodes[NodeType.scoring].module._embedder.config.max_length == 8 + assert loaded_pipe.nodes[NodeType.scoring].module._embedder.config.tokenizer_config.max_length == 8 def test_no_saving(dataset):