5353 _ToolCallRequest ,
5454)
5555from .json_api import (
56+ ActionResult ,
5657 AnyModelSpecifier ,
5758 AvailableModelBase ,
5859 ChannelEndpoint ,
8586 ModelSessionTypes ,
8687 ModelTypesEmbedding ,
8788 ModelTypesLlm ,
88- OperationResult ,
8989 PredictionEndpoint ,
9090 PredictionFragmentEvent ,
9191 PredictionResult ,
@@ -1539,7 +1539,7 @@ def respond(
15391539 # Multi-round predictions are currently a sync-only handle-only feature
15401540 # TODO: Refactor to allow for more code sharing with the async API
15411541 @sdk_public_api ()
1542- def operate (
1542+ def act (
15431543 self ,
15441544 chat : Chat | ChatHistoryDataDict | str ,
15451545 tools : Iterable [ToolFunctionDef | ToolFunctionDefDict ],
@@ -1559,14 +1559,14 @@ def operate(
15591559 [LMStudioPredictionError , _ToolCallRequest | None ], str
15601560 ]
15611561 | None = None ,
1562- ) -> OperationResult :
1562+ ) -> ActionResult :
15631563 """Request a response (with implicit tool use) in an ongoing assistant chat session."""
1564- operation_start_time = time .perf_counter ()
1564+ action_start_time = time .perf_counter ()
15651565 # It is not yet possible to combine tool calling with requests for structured responses
15661566 response_format = None
15671567 if isinstance (chat , Chat ):
15681568 chat ._fetch_file_handles (self ._session ._fetch_file_handle )
1569- op_chat : Chat = Chat .from_history (chat )
1569+ action_chat : Chat = Chat .from_history (chat )
15701570 del chat
15711571 # Multiple rounds, until all tool calls are resolved or limit is reached
15721572 round_counter : Iterable [int ]
@@ -1624,7 +1624,7 @@ def _wrapped_on_prompt_processing_progress(progress: float) -> None:
16241624 # * be able to disallow tool use when the rounds are limited
16251625 endpoint = ChatResponseEndpoint (
16261626 self .identifier ,
1627- op_chat ,
1627+ action_chat ,
16281628 response_format ,
16291629 config ,
16301630 None , # Multiple messages are generated per round
@@ -1658,23 +1658,23 @@ def _wrapped_on_prompt_processing_progress(progress: float) -> None:
16581658 tool_results = [
16591659 fut .result () for fut in as_completed (pending_tool_calls )
16601660 ]
1661- requests_message = op_chat ._add_assistant_tool_requests (
1661+ requests_message = action_chat ._add_assistant_tool_requests (
16621662 prediction , tool_call_requests
16631663 )
1664- results_message = op_chat ._add_tool_results (tool_results )
1664+ results_message = action_chat ._add_tool_results (tool_results )
16651665 if on_message is not None :
16661666 on_message (requests_message )
16671667 on_message (results_message )
16681668 elif on_message is not None :
1669- on_message (op_chat .add_assistant_response (prediction ))
1669+ on_message (action_chat .add_assistant_response (prediction ))
16701670 if on_round_end is not None :
16711671 on_round_end (round_index )
16721672 if not tool_call_requests :
16731673 # No tool call requests -> we're done here
16741674 break
16751675 num_rounds = round_index + 1
1676- duration = time .perf_counter () - operation_start_time
1677- return OperationResult (rounds = num_rounds , total_time_seconds = duration )
1676+ duration = time .perf_counter () - action_start_time
1677+ return ActionResult (rounds = num_rounds , total_time_seconds = duration )
16781678
16791679 @sdk_public_api ()
16801680 def apply_prompt_template (
0 commit comments