diff --git a/src/lmstudio/json_api.py b/src/lmstudio/json_api.py index d6c954e..1218eb4 100644 --- a/src/lmstudio/json_api.py +++ b/src/lmstudio/json_api.py @@ -162,6 +162,7 @@ "PredictionRoundResult", "PromptProcessingCallback", "SerializedLMSExtendedError", + "ToolDefinition", "ToolFunctionDef", "ToolFunctionDefDict", ] @@ -1361,6 +1362,9 @@ def _additional_config_options(cls) -> DictObject: return {"for_text_completion": True} +ToolDefinition: TypeAlias = ToolFunctionDef | ToolFunctionDefDict | Callable[..., Any] + + class ChatResponseEndpoint(PredictionEndpoint[TPrediction]): """API channel endpoint for requesting a chat response from a model.""" @@ -1371,7 +1375,7 @@ class ChatResponseEndpoint(PredictionEndpoint[TPrediction]): # TODO: Consider implementing this conversion in _kv_config.py @staticmethod def parse_tools( - tools: Iterable[ToolFunctionDef | ToolFunctionDefDict | Callable[..., Any]], + tools: Iterable[ToolDefinition], ) -> tuple[LlmToolUseSettingToolArray, ClientToolMap]: """Split tool function definitions into server and client details.""" if not tools: diff --git a/src/lmstudio/sync_api.py b/src/lmstudio/sync_api.py index 85467bd..a744b8f 100644 --- a/src/lmstudio/sync_api.py +++ b/src/lmstudio/sync_api.py @@ -103,8 +103,7 @@ RemoteCallHandler, TModelInfo, TPrediction, - ToolFunctionDef, - ToolFunctionDefDict, + ToolDefinition, check_model_namespace, load_struct, _model_spec_to_api_dict, @@ -1547,7 +1546,7 @@ def respond( def act( self, chat: Chat | ChatHistoryDataDict | str, - tools: Iterable[ToolFunctionDef | ToolFunctionDefDict], + tools: Iterable[ToolDefinition], *, max_prediction_rounds: int | None = None, config: LlmPredictionConfig | LlmPredictionConfigDict | None = None,