Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 5 additions & 10 deletions src/lmstudio/json_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,9 +95,7 @@
LlmTool,
LlmToolUseSettingToolArray,
ModelInfo,
# TODO: Import this once the definition is fixed in lmstudio.js
# https://github.com/lmstudio-ai/lmstudio.js/pull/222
# ModelInstanceInfo,
ModelInstanceInfo,
ModelSearchOptsDict,
ModelSpecifier,
ModelSpecifierDict,
Expand Down Expand Up @@ -197,10 +195,6 @@
EmbeddingRpcUnloadModelParameter | LlmRpcUnloadModelParameter
)

# TODO: Drop this workaround once the lmstudio.js schema export is fixed:
# https://github.com/lmstudio-ai/lmstudio.js/pull/222
ModelInstanceInfo: TypeAlias = EmbeddingModelInstanceInfo | LlmInstanceInfo


class ModelSessionTypes(Generic[TLoadConfig]):
"""Helper class to group related types for code sharing across model namespaces."""
Expand Down Expand Up @@ -516,8 +510,8 @@ def __init__(self, logger: StructuredLogger) -> None:
self._last_channel_id = 0
self._pending_calls: dict[int, TQueue] = {}
self._last_call_id = 0
# TODO: add `_last_subscriber_id` when adding signal support
# TODO: add `_active_subscriptions` when adding signal support
# `_active_subscriptions` (if we add signal support)
# `_last_subscriber_id` (if we add signal support)
self._logger = logger

def all_queues(self) -> Iterator[TQueue]:
Expand Down Expand Up @@ -1288,6 +1282,7 @@ def request_tool_call(
)
result = _ToolCallResultData(content=err_msg, tool_call_id=tool_call_id)
return lambda: result
# Validate parameters against their specification
params_struct, implementation = client_tool
raw_kwds = request.arguments
try:
Expand All @@ -1300,7 +1295,7 @@ def request_tool_call(
return lambda: result
kwds = to_builtins(parsed_kwds)

# TODO: Validate parameters against their specification
# Allow caller to schedule the tool call request for background execution
def _call_requested_tool() -> _ToolCallResultData:
call_result = implementation(**kwds)
return _ToolCallResultData(
Expand Down
4 changes: 0 additions & 4 deletions tests/async/test_model_handles_async.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,10 +45,6 @@ async def test_completion_llm_handle_async(model_id: str, caplog: LogCap) -> Non
assert response.content


# TODO: also test `complete_stream`, `respond`, and `respond_stream`,
# as all the wrappers are implemented independently of each other


@pytest.mark.asyncio
@pytest.mark.lmstudio
@pytest.mark.parametrize("model_id", (EXPECTED_EMBEDDING, EXPECTED_EMBEDDING_ID))
Expand Down
4 changes: 0 additions & 4 deletions tests/sync/test_model_handles_sync.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,10 +51,6 @@ def test_completion_llm_handle_sync(model_id: str, caplog: LogCap) -> None:
assert response.content


# TODO: also test `complete_stream`, `respond`, and `respond_stream`,
# as all the wrappers are implemented independently of each other


@pytest.mark.lmstudio
@pytest.mark.parametrize("model_id", (EXPECTED_EMBEDDING, EXPECTED_EMBEDDING_ID))
def test_embedding_handle_sync(model_id: str, caplog: LogCap) -> None:
Expand Down
Loading