Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions examples/chatbot.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,10 @@

import readline # Enables input line editing

import lmstudio as lm
import lmstudio as lms

model = lm.llm()
chat = lm.Chat("You are a task focused AI assistant")
model = lms.llm()
chat = lms.Chat("You are a task focused AI assistant")

while True:
try:
Expand Down
6 changes: 3 additions & 3 deletions examples/structured-response.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,15 @@

import json

import lmstudio as lm
import lmstudio as lms

class BookSchema(lm.BaseModel):
class BookSchema(lms.BaseModel):
"""Structured information about a published book."""
title: str
author: str
year: int

model = lm.llm()
model = lms.llm()

result = model.respond("Tell me about The Hobbit", response_format=BookSchema)
book = result.parsed
Expand Down
4 changes: 2 additions & 2 deletions examples/terminal-sim.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@

import readline # Enables input line editing

import lmstudio as lm
import lmstudio as lms

model = lm.llm()
model = lms.llm()
console_history = []

while True:
Expand Down
4 changes: 2 additions & 2 deletions examples/tool-use-multiple.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"""Example script demonstrating agent use of multiple tools."""

import math
import lmstudio as lm
import lmstudio as lms

def add(a: int, b: int) -> int:
"""Given two numbers a and b, returns the sum of them."""
Expand All @@ -18,7 +18,7 @@ def is_prime(n: int) -> bool:
return False
return True

model = lm.llm("qwen2.5-7b-instruct")
model = lms.llm("qwen2.5-7b-instruct")
model.act(
"Is the result of 12345 + 45668 a prime? Think step by step.",
[add, is_prime],
Expand Down
4 changes: 2 additions & 2 deletions examples/tool-use.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
#!/usr/bin/env python
"""Example script demonstrating agent tool use."""

import lmstudio as lm
import lmstudio as lms

def multiply(a: float, b: float) -> float:
"""Given two numbers a and b. Returns the product of them."""
return a * b

model = lm.llm("qwen2.5-7b-instruct")
model = lms.llm("qwen2.5-7b-instruct")
model.act(
"What is the result of 12345 multiplied by 54321?",
[multiply],
Expand Down
10 changes: 5 additions & 5 deletions tests/load_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from contextlib import contextmanager
from typing import Generator

import lmstudio as lm
import lmstudio as lms

from .support import (
EXPECTED_EMBEDDING_ID,
Expand All @@ -23,27 +23,27 @@
def print_load_result(model_identifier: str) -> Generator[None, None, None]:
try:
yield
except lm.LMStudioModelNotFoundError:
except lms.LMStudioModelNotFoundError:
print(f"Load error: {model_identifier!r} is not yet downloaded")
else:
print(f"Loaded: {model_identifier!r}")


async def _load_llm(client: lm.AsyncClient, model_identifier: str) -> None:
async def _load_llm(client: lms.AsyncClient, model_identifier: str) -> None:
with print_load_result(model_identifier):
await client.llm.load_new_instance(
model_identifier, config=LLM_LOAD_CONFIG, ttl=None
)


async def _load_embedding_model(client: lm.AsyncClient, model_identifier: str) -> None:
async def _load_embedding_model(client: lms.AsyncClient, model_identifier: str) -> None:
with print_load_result(model_identifier):
await client.embedding.load_new_instance(model_identifier, ttl=None)


async def reload_models() -> None:
await unload_models()
async with lm.AsyncClient() as client:
async with lms.AsyncClient() as client:
await asyncio.gather(
_load_llm(client, EXPECTED_LLM_ID),
_load_llm(client, EXPECTED_VLM_ID),
Expand Down
32 changes: 16 additions & 16 deletions tests/test_convenience_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
# Note: before testing additional functionality (such as passing configs),
# this should be migrated to mock-style testing rather than end-to-end

import lmstudio as lm
import lmstudio as lms

import pytest

Expand All @@ -17,32 +17,32 @@

@pytest.mark.lmstudio
def test_get_default_client() -> None:
client = lm.get_default_client()
assert isinstance(client, lm.Client)
client = lms.get_default_client()
assert isinstance(client, lms.Client)


@pytest.mark.lmstudio
def test_llm_any() -> None:
model = lm.llm()
model = lms.llm()
assert model.identifier in (EXPECTED_LLM_ID, EXPECTED_VLM_ID, TOOL_LLM_ID)


@pytest.mark.lmstudio
@pytest.mark.parametrize("model_id", (EXPECTED_LLM_ID, EXPECTED_VLM_ID, TOOL_LLM_ID))
def test_llm_specific(model_id: str) -> None:
model = lm.llm(model_id)
model = lms.llm(model_id)
assert model.identifier == model_id


@pytest.mark.lmstudio
def test_embedding_any() -> None:
model = lm.embedding_model()
model = lms.embedding_model()
assert model.identifier == EXPECTED_EMBEDDING_ID


@pytest.mark.lmstudio
def test_embedding_specific() -> None:
model = lm.embedding_model(EXPECTED_EMBEDDING_ID)
model = lms.embedding_model(EXPECTED_EMBEDDING_ID)
assert model.identifier == EXPECTED_EMBEDDING_ID


Expand All @@ -51,34 +51,34 @@ def test_add_temp_file() -> None:
# API is private until LM Studio file handle support stabilizes
name = "example-file"
raw_data = b"raw data"
file_handle = lm.sync_api._add_temp_file(raw_data, name)
file_handle = lms.sync_api._add_temp_file(raw_data, name)
assert file_handle.name == name
assert file_handle.size_bytes == len(raw_data)


@pytest.mark.lmstudio
def test_list_downloaded_models() -> None:
all_models = [m.model_key for m in lm.list_downloaded_models()]
embedding_models = [m.model_key for m in lm.list_downloaded_models("embedding")]
llms = [m.model_key for m in lm.list_downloaded_models("llm")]
all_models = [m.model_key for m in lms.list_downloaded_models()]
embedding_models = [m.model_key for m in lms.list_downloaded_models("embedding")]
llms = [m.model_key for m in lms.list_downloaded_models("llm")]
assert set(all_models) == (set(embedding_models) | set(llms))


@pytest.mark.lmstudio
def test_list_loaded_models() -> None:
all_models = [m.identifier for m in lm.list_loaded_models()]
embedding_models = [m.identifier for m in lm.list_loaded_models("embedding")]
llms = [m.identifier for m in lm.list_loaded_models("llm")]
all_models = [m.identifier for m in lms.list_loaded_models()]
embedding_models = [m.identifier for m in lms.list_loaded_models("embedding")]
llms = [m.identifier for m in lms.list_loaded_models("llm")]
assert set(all_models) == (set(embedding_models) | set(llms))


@pytest.mark.lmstudio
def test_list_loaded_embedding_models() -> None:
models = [m.identifier for m in lm.list_loaded_models("embedding")]
models = [m.identifier for m in lms.list_loaded_models("embedding")]
assert not (set((EXPECTED_EMBEDDING_ID,)) - set(models))


@pytest.mark.lmstudio
def test_list_loaded_LLMs() -> None:
models = [m.identifier for m in lm.list_loaded_models("llm")]
models = [m.identifier for m in lms.list_loaded_models("llm")]
assert not (set((EXPECTED_LLM_ID, EXPECTED_VLM_ID, TOOL_LLM_ID)) - set(models))
8 changes: 4 additions & 4 deletions tests/unload_models.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
"""Unload the models required by the test suite."""

import asyncio
import lmstudio as lm
import lmstudio as lms

from .support import (
EXPECTED_EMBEDDING_ID,
Expand All @@ -10,20 +10,20 @@
TOOL_LLM_ID,
)

AsyncSessionModel = lm.async_api.AsyncSessionEmbedding | lm.async_api.AsyncSessionLlm
AsyncSessionModel = lms.async_api.AsyncSessionEmbedding | lms.async_api.AsyncSessionLlm


async def _unload_model(session: AsyncSessionModel, model_identifier: str) -> None:
try:
await session.unload(model_identifier)
except lm.LMStudioModelNotFoundError:
except lms.LMStudioModelNotFoundError:
print(f"Unloaded: {model_identifier!r} (model was not loaded)")
else:
print(f"Unloaded: {model_identifier!r}")


async def unload_models() -> None:
async with lm.AsyncClient() as client:
async with lms.AsyncClient() as client:
await asyncio.gather(
_unload_model(client.llm, EXPECTED_LLM_ID),
_unload_model(client.llm, EXPECTED_VLM_ID),
Expand Down
Loading