Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 29 additions & 9 deletions src/lmstudio/async_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,7 @@
check_model_namespace,
load_struct,
_model_spec_to_api_dict,
_redact_json,
)
from ._kv_config import TLoadConfig, TLoadConfigDict, dict_from_fields_key
from ._sdk_models import (
Expand Down Expand Up @@ -368,7 +369,14 @@ async def _send_call(
"""Initiate remote call to specified endpoint."""
self._ensure_connected("send remote procedure call")
call_message = rpc.get_rpc_message(endpoint, params)
self._logger.debug("Sending RPC request", json=call_message)
# TODO: Improve logging for large requests (such as file uploads)
# without requiring explicit special casing here
logged_message: DictObject
if call_message.get("endpoint") == "uploadFileBase64":
logged_message = _redact_json(call_message)
else:
logged_message = call_message
self._logger.debug("Sending RPC request", json=logged_message)
await self._send_json(call_message)

async def remote_call(
Expand Down Expand Up @@ -593,8 +601,15 @@ async def _fetch_file_handle(self, file_data: _LocalFileData) -> FileHandle:
async def prepare_file(
self, src: LocalFileInput, name: str | None = None
) -> FileHandle:
"""Add a file to the server."""
# Private until LM Studio file handle support stabilizes
"""Add a file to the server. Returns a file handle for use in prediction requests."""
file_data = _LocalFileData(src, name)
return await self._fetch_file_handle(file_data)

@sdk_public_api_async()
async def prepare_image(
self, src: LocalFileInput, name: str | None = None
) -> FileHandle:
"""Add an image to the server. Returns a file handle for use in prediction requests."""
file_data = _LocalFileData(src, name)
return await self._fetch_file_handle(file_data)

Expand Down Expand Up @@ -672,7 +687,7 @@ def _system_session(self) -> AsyncSessionSystem:

@property
def _files_session(self) -> _AsyncSessionFiles:
return self._client._files
return self._client.files

async def _get_load_config(self, model_specifier: AnyModelSpecifier) -> DictObject:
"""Get the model load config for the specified model."""
Expand Down Expand Up @@ -1490,9 +1505,8 @@ def system(self) -> AsyncSessionSystem:
return self._get_session(AsyncSessionSystem)

@property
def _files(self) -> _AsyncSessionFiles:
def files(self) -> _AsyncSessionFiles:
"""Return the files API client session."""
# Private until LM Studio file handle support stabilizes
return self._get_session(_AsyncSessionFiles)

@property
Expand All @@ -1505,9 +1519,15 @@ def repository(self) -> AsyncSessionRepository:
async def prepare_file(
self, src: LocalFileInput, name: str | None = None
) -> FileHandle:
"""Add a file to the server."""
# Private until LM Studio file handle support stabilizes
return await self._files.prepare_file(src, name)
"""Add a file to the server. Returns a file handle for use in prediction requests."""
return await self.files.prepare_file(src, name)

@sdk_public_api_async()
async def prepare_image(
self, src: LocalFileInput, name: str | None = None
) -> FileHandle:
"""Add an image to the server. Returns a file handle for use in prediction requests."""
return await self.files.prepare_image(src, name)

@sdk_public_api_async()
async def list_downloaded_models(
Expand Down
4 changes: 2 additions & 2 deletions src/lmstudio/history.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@
ChatMessagePartToolCallResultData as ToolCallResultData,
ChatMessagePartToolCallResultDataDict as ToolCallResultDataDict,
FilesRpcUploadFileBase64Parameter,
FileType as FileHandleType,
FileType,
ToolCallRequest as ToolCallRequest,
FunctionToolCallRequestDict as ToolCallRequestDict,
)
Expand All @@ -70,7 +70,7 @@
"FileHandle",
"FileHandleDict",
"FileHandleInput",
"FileHandleType",
"FileType",
"SystemPrompt",
"SystemPromptContent",
"ToolCallRequest",
Expand Down
5 changes: 5 additions & 0 deletions src/lmstudio/json_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
assert_never,
cast,
get_type_hints,
overload,
)


Expand Down Expand Up @@ -490,6 +491,10 @@ class ActResult:
# fmt: on


@overload
def _redact_json(data: DictObject) -> DictObject: ...
@overload
def _redact_json(data: None) -> None: ...
def _redact_json(data: DictObject | None) -> DictObject | None:
"""Show top level structure without any substructure details."""
if data is None:
Expand Down
41 changes: 31 additions & 10 deletions src/lmstudio/sync_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,7 @@
check_model_namespace,
load_struct,
_model_spec_to_api_dict,
_redact_json,
)
from ._kv_config import TLoadConfig, TLoadConfigDict, dict_from_fields_key
from ._sdk_models import (
Expand Down Expand Up @@ -548,7 +549,14 @@ def _send_call(
"""Initiate remote call to specified endpoint."""
self._ensure_connected("send remote procedure call")
call_message = rpc.get_rpc_message(endpoint, params)
self._logger.debug("Sending RPC request", json=call_message)
# TODO: Improve logging for large requests (such as file uploads)
# without requiring explicit special casing here
logged_message: DictObject
if call_message.get("endpoint") == "uploadFileBase64":
logged_message = _redact_json(call_message)
else:
logged_message = call_message
self._logger.debug("Sending RPC request", json=logged_message)
self._send_json(call_message)

def remote_call(
Expand Down Expand Up @@ -766,8 +774,13 @@ def _fetch_file_handle(self, file_data: _LocalFileData) -> FileHandle:

@sdk_public_api()
def prepare_file(self, src: LocalFileInput, name: str | None = None) -> FileHandle:
"""Add a file to the server."""
# Private until LM Studio file handle support stabilizes
"""Add a file to the server. Returns a file handle for use in prediction requests."""
file_data = _LocalFileData(src, name)
return self._fetch_file_handle(file_data)

@sdk_public_api()
def prepare_image(self, src: LocalFileInput, name: str | None = None) -> FileHandle:
"""Add an image to the server. Returns a file handle for use in prediction requests."""
file_data = _LocalFileData(src, name)
return self._fetch_file_handle(file_data)

Expand Down Expand Up @@ -838,7 +851,7 @@ def _system_session(self) -> SyncSessionSystem:

@property
def _files_session(self) -> _SyncSessionFiles:
return self._client._files
return self._client.files

def _get_load_config(self, model_specifier: AnyModelSpecifier) -> DictObject:
"""Get the model load config for the specified model."""
Expand Down Expand Up @@ -1806,9 +1819,8 @@ def system(self) -> SyncSessionSystem:
return self._get_session(SyncSessionSystem)

@property
def _files(self) -> _SyncSessionFiles:
def files(self) -> _SyncSessionFiles:
"""Return the files API client session."""
# Private until LM Studio file handle support stabilizes
return self._get_session(_SyncSessionFiles)

@property
Expand All @@ -1819,9 +1831,13 @@ def repository(self) -> SyncSessionRepository:
# Convenience methods
@sdk_public_api()
def prepare_file(self, src: LocalFileInput, name: str | None = None) -> FileHandle:
"""Add a file to the server."""
# Private until LM Studio file handle support stabilizes
return self._files.prepare_file(src, name)
"""Add a file to the server. Returns a file handle for use in prediction requests."""
return self.files.prepare_file(src, name)

@sdk_public_api()
def prepare_image(self, src: LocalFileInput, name: str | None = None) -> FileHandle:
"""Add an image to the server. Returns a file handle for use in prediction requests."""
return self.files.prepare_image(src, name)

@sdk_public_api()
def list_downloaded_models(
Expand Down Expand Up @@ -1893,10 +1909,15 @@ def embedding_model(
@sdk_public_api()
def prepare_file(src: LocalFileInput, name: str | None = None) -> FileHandle:
"""Add a file to the server using the default global client."""
# Private until LM Studio file handle support stabilizes
return get_default_client().prepare_file(src, name)


@sdk_public_api()
def prepare_image(src: LocalFileInput, name: str | None = None) -> FileHandle:
"""Add an image to the server using the default global client."""
return get_default_client().prepare_image(src, name)


@sdk_public_api()
def list_downloaded_models(
namespace: str | None = None,
Expand Down
44 changes: 31 additions & 13 deletions tests/async/test_images_async.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,37 +23,55 @@
async def test_upload_from_pathlike_async(caplog: LogCap) -> None:
caplog.set_level(logging.DEBUG)
async with AsyncClient() as client:
session = client._files
session = client.files
file = await session.prepare_file(IMAGE_FILEPATH)
assert file
assert isinstance(file, FileHandle)
logging.info(f"Uploaded file: {file}")
image = await session.prepare_image(IMAGE_FILEPATH)
assert image
assert isinstance(image, FileHandle)
logging.info(f"Uploaded image: {image}")
# Even with the same data uploaded, assigned identifiers should differ
assert image != file


@pytest.mark.asyncio
@pytest.mark.lmstudio
async def test_upload_from_file_obj_async(caplog: LogCap) -> None:
caplog.set_level(logging.DEBUG)
async with AsyncClient() as client:
session = client._files
session = client.files
with open(IMAGE_FILEPATH, "rb") as f:
file = await session.prepare_file(f)
assert file
assert isinstance(file, FileHandle)
logging.info(f"Uploaded file: {file}")
with open(IMAGE_FILEPATH, "rb") as f:
image = await session.prepare_image(f)
assert image
assert isinstance(image, FileHandle)
logging.info(f"Uploaded image: {image}")
# Even with the same data uploaded, assigned identifiers should differ
assert image != file


@pytest.mark.asyncio
@pytest.mark.lmstudio
async def test_upload_from_bytesio_async(caplog: LogCap) -> None:
caplog.set_level(logging.DEBUG)
async with AsyncClient() as client:
session = client._files
with open(IMAGE_FILEPATH, "rb") as f:
file = await session.prepare_file(BytesIO(f.read()))
session = client.files
file = await session.prepare_file(BytesIO(IMAGE_FILEPATH.read_bytes()))
assert file
assert isinstance(file, FileHandle)
logging.info(f"Uploaded file: {file}")
image = await session.prepare_image(BytesIO(IMAGE_FILEPATH.read_bytes()))
assert image
assert isinstance(image, FileHandle)
logging.info(f"Uploaded image: {image}")
# Even with the same data uploaded, assigned identifiers should differ
assert image != file


@pytest.mark.asyncio
Expand All @@ -64,9 +82,9 @@ async def test_vlm_predict_async(caplog: LogCap) -> None:
caplog.set_level(logging.DEBUG)
model_id = EXPECTED_VLM_ID
async with AsyncClient() as client:
file_handle = await client._files.prepare_file(IMAGE_FILEPATH)
image_handle = await client.files.prepare_image(IMAGE_FILEPATH)
history = Chat()
history.add_user_message((prompt, file_handle))
history.add_user_message((prompt, image_handle))
vlm = await client.llm.model(model_id)
response = await vlm.respond(history, config=SHORT_PREDICTION_CONFIG)
logging.info(f"VLM response: {response!r}")
Expand All @@ -84,9 +102,9 @@ async def test_non_vlm_predict_async(caplog: LogCap) -> None:
caplog.set_level(logging.DEBUG)
model_id = "hugging-quants/llama-3.2-1b-instruct"
async with AsyncClient() as client:
file_handle = await client._files.prepare_file(IMAGE_FILEPATH)
image_handle = await client.files.prepare_image(IMAGE_FILEPATH)
history = Chat()
history.add_user_message((prompt, file_handle))
history.add_user_message((prompt, image_handle))
llm = await client.llm.model(model_id)
with pytest.raises(LMStudioServerError) as exc_info:
await llm.respond(history)
Expand All @@ -101,9 +119,9 @@ async def test_vlm_predict_image_param_async(caplog: LogCap) -> None:
caplog.set_level(logging.DEBUG)
model_id = EXPECTED_VLM_ID
async with AsyncClient() as client:
file_handle = await client._files.prepare_file(IMAGE_FILEPATH)
image_handle = await client.files.prepare_image(IMAGE_FILEPATH)
history = Chat()
history.add_user_message(prompt, images=[file_handle])
history.add_user_message(prompt, images=[image_handle])
vlm = await client.llm.model(model_id)
response = await vlm.respond(history, config=SHORT_PREDICTION_CONFIG)
logging.info(f"VLM response: {response!r}")
Expand All @@ -121,9 +139,9 @@ async def test_non_vlm_predict_image_param_async(caplog: LogCap) -> None:
caplog.set_level(logging.DEBUG)
model_id = "hugging-quants/llama-3.2-1b-instruct"
async with AsyncClient() as client:
file_handle = await client._files.prepare_file(IMAGE_FILEPATH)
image_handle = await client.files.prepare_image(IMAGE_FILEPATH)
history = Chat()
history.add_user_message(prompt, images=[file_handle])
history.add_user_message(prompt, images=[image_handle])
llm = await client.llm.model(model_id)
with pytest.raises(LMStudioServerError) as exc_info:
await llm.respond(history)
Expand Down
Loading
Loading