Skip to content

Commit 97db620

Browse files
committed
inferenceendpoint renamed to ie
1 parent e233190 commit 97db620

File tree

3 files changed

+3
-7
lines changed

3 files changed

+3
-7
lines changed

src/lighteval/models/endpoints/endpoint_model.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -316,9 +316,7 @@ def __init__( # noqa: C901
316316
model_size=-1,
317317
)
318318
self.generation_parameters = config.generation_parameters
319-
self.generation_config = TextGenerationInputGenerateParameters(
320-
**self.generation_parameters.to_tgi_inferenceendpoint_dict()
321-
)
319+
self.generation_config = TextGenerationInputGenerateParameters(**self.generation_parameters.to_tgi_ie_dict())
322320

323321
@staticmethod
324322
def get_larger_hardware_suggestion(cur_instance_type: str = None, cur_instance_size: str = None):

src/lighteval/models/endpoints/tgi_model.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -88,9 +88,7 @@ def __init__(self, config: TGIModelConfig) -> None:
8888

8989
self.client = AsyncClient(config.inference_server_address, headers=headers, timeout=240)
9090
self.generation_parameters = config.generation_parameters
91-
self.generation_config = TextGenerationInputGenerateParameters(
92-
**self.generation_parameters.to_tgi_inferenceendpoint_dict()
93-
)
91+
self.generation_config = TextGenerationInputGenerateParameters(**self.generation_parameters.to_tgi_ie_dict())
9492
self._max_gen_toks = 256
9593
self.model_info = requests.get(f"{config.inference_server_address}/info", headers=headers).json()
9694
if "model_id" not in self.model_info:

src/lighteval/models/model_input.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,7 @@ def to_transformers_dict(self) -> dict:
9797
}
9898
return {k: v for k, v in args.items() if v is not None}
9999

100-
def to_tgi_inferenceendpoint_dict(self) -> dict:
100+
def to_tgi_ie_dict(self) -> dict:
101101
"""Selects relevant generation and sampling parameters for tgi or inference endpoints models.
102102
Doc: https://huggingface.co/docs/huggingface_hub/v0.26.3/en/package_reference/inference_types#huggingface_hub.TextGenerationInputGenerateParameters
103103

0 commit comments

Comments
 (0)