diff --git a/examples/chatbot.py b/examples/chatbot.py index 58ac2a2..cb1c0ae 100755 --- a/examples/chatbot.py +++ b/examples/chatbot.py @@ -3,10 +3,10 @@ import readline # Enables input line editing -import lmstudio as lm +import lmstudio as lms -model = lm.llm() -chat = lm.Chat("You are a task focused AI assistant") +model = lms.llm() +chat = lms.Chat("You are a task focused AI assistant") while True: try: diff --git a/examples/structured-response.py b/examples/structured-response.py index 8714ef6..c753678 100755 --- a/examples/structured-response.py +++ b/examples/structured-response.py @@ -3,15 +3,15 @@ import json -import lmstudio as lm +import lmstudio as lms -class BookSchema(lm.BaseModel): +class BookSchema(lms.BaseModel): """Structured information about a published book.""" title: str author: str year: int -model = lm.llm() +model = lms.llm() result = model.respond("Tell me about The Hobbit", response_format=BookSchema) book = result.parsed diff --git a/examples/terminal-sim.py b/examples/terminal-sim.py index 2eedd17..4709d5e 100755 --- a/examples/terminal-sim.py +++ b/examples/terminal-sim.py @@ -3,9 +3,9 @@ import readline # Enables input line editing -import lmstudio as lm +import lmstudio as lms -model = lm.llm() +model = lms.llm() console_history = [] while True: diff --git a/examples/tool-use-multiple.py b/examples/tool-use-multiple.py index 0901141..3a1c351 100644 --- a/examples/tool-use-multiple.py +++ b/examples/tool-use-multiple.py @@ -2,7 +2,7 @@ """Example script demonstrating agent use of multiple tools.""" import math -import lmstudio as lm +import lmstudio as lms def add(a: int, b: int) -> int: """Given two numbers a and b, returns the sum of them.""" @@ -18,7 +18,7 @@ def is_prime(n: int) -> bool: return False return True -model = lm.llm("qwen2.5-7b-instruct") +model = lms.llm("qwen2.5-7b-instruct") model.act( "Is the result of 12345 + 45668 a prime? Think step by step.", [add, is_prime], diff --git a/examples/tool-use.py b/examples/tool-use.py index adb1422..f7a59d6 100755 --- a/examples/tool-use.py +++ b/examples/tool-use.py @@ -1,13 +1,13 @@ #!/usr/bin/env python """Example script demonstrating agent tool use.""" -import lmstudio as lm +import lmstudio as lms def multiply(a: float, b: float) -> float: """Given two numbers a and b. Returns the product of them.""" return a * b -model = lm.llm("qwen2.5-7b-instruct") +model = lms.llm("qwen2.5-7b-instruct") model.act( "What is the result of 12345 multiplied by 54321?", [multiply], diff --git a/tests/load_models.py b/tests/load_models.py index 532c594..24b033e 100644 --- a/tests/load_models.py +++ b/tests/load_models.py @@ -4,7 +4,7 @@ from contextlib import contextmanager from typing import Generator -import lmstudio as lm +import lmstudio as lms from .support import ( EXPECTED_EMBEDDING_ID, @@ -23,27 +23,27 @@ def print_load_result(model_identifier: str) -> Generator[None, None, None]: try: yield - except lm.LMStudioModelNotFoundError: + except lms.LMStudioModelNotFoundError: print(f"Load error: {model_identifier!r} is not yet downloaded") else: print(f"Loaded: {model_identifier!r}") -async def _load_llm(client: lm.AsyncClient, model_identifier: str) -> None: +async def _load_llm(client: lms.AsyncClient, model_identifier: str) -> None: with print_load_result(model_identifier): await client.llm.load_new_instance( model_identifier, config=LLM_LOAD_CONFIG, ttl=None ) -async def _load_embedding_model(client: lm.AsyncClient, model_identifier: str) -> None: +async def _load_embedding_model(client: lms.AsyncClient, model_identifier: str) -> None: with print_load_result(model_identifier): await client.embedding.load_new_instance(model_identifier, ttl=None) async def reload_models() -> None: await unload_models() - async with lm.AsyncClient() as client: + async with lms.AsyncClient() as client: await asyncio.gather( _load_llm(client, EXPECTED_LLM_ID), _load_llm(client, EXPECTED_VLM_ID), diff --git a/tests/test_convenience_api.py b/tests/test_convenience_api.py index c33c07b..e943bf1 100644 --- a/tests/test_convenience_api.py +++ b/tests/test_convenience_api.py @@ -3,7 +3,7 @@ # Note: before testing additional functionality (such as passing configs), # this should be migrated to mock-style testing rather than end-to-end -import lmstudio as lm +import lmstudio as lms import pytest @@ -17,32 +17,32 @@ @pytest.mark.lmstudio def test_get_default_client() -> None: - client = lm.get_default_client() - assert isinstance(client, lm.Client) + client = lms.get_default_client() + assert isinstance(client, lms.Client) @pytest.mark.lmstudio def test_llm_any() -> None: - model = lm.llm() + model = lms.llm() assert model.identifier in (EXPECTED_LLM_ID, EXPECTED_VLM_ID, TOOL_LLM_ID) @pytest.mark.lmstudio @pytest.mark.parametrize("model_id", (EXPECTED_LLM_ID, EXPECTED_VLM_ID, TOOL_LLM_ID)) def test_llm_specific(model_id: str) -> None: - model = lm.llm(model_id) + model = lms.llm(model_id) assert model.identifier == model_id @pytest.mark.lmstudio def test_embedding_any() -> None: - model = lm.embedding_model() + model = lms.embedding_model() assert model.identifier == EXPECTED_EMBEDDING_ID @pytest.mark.lmstudio def test_embedding_specific() -> None: - model = lm.embedding_model(EXPECTED_EMBEDDING_ID) + model = lms.embedding_model(EXPECTED_EMBEDDING_ID) assert model.identifier == EXPECTED_EMBEDDING_ID @@ -51,34 +51,34 @@ def test_add_temp_file() -> None: # API is private until LM Studio file handle support stabilizes name = "example-file" raw_data = b"raw data" - file_handle = lm.sync_api._add_temp_file(raw_data, name) + file_handle = lms.sync_api._add_temp_file(raw_data, name) assert file_handle.name == name assert file_handle.size_bytes == len(raw_data) @pytest.mark.lmstudio def test_list_downloaded_models() -> None: - all_models = [m.model_key for m in lm.list_downloaded_models()] - embedding_models = [m.model_key for m in lm.list_downloaded_models("embedding")] - llms = [m.model_key for m in lm.list_downloaded_models("llm")] + all_models = [m.model_key for m in lms.list_downloaded_models()] + embedding_models = [m.model_key for m in lms.list_downloaded_models("embedding")] + llms = [m.model_key for m in lms.list_downloaded_models("llm")] assert set(all_models) == (set(embedding_models) | set(llms)) @pytest.mark.lmstudio def test_list_loaded_models() -> None: - all_models = [m.identifier for m in lm.list_loaded_models()] - embedding_models = [m.identifier for m in lm.list_loaded_models("embedding")] - llms = [m.identifier for m in lm.list_loaded_models("llm")] + all_models = [m.identifier for m in lms.list_loaded_models()] + embedding_models = [m.identifier for m in lms.list_loaded_models("embedding")] + llms = [m.identifier for m in lms.list_loaded_models("llm")] assert set(all_models) == (set(embedding_models) | set(llms)) @pytest.mark.lmstudio def test_list_loaded_embedding_models() -> None: - models = [m.identifier for m in lm.list_loaded_models("embedding")] + models = [m.identifier for m in lms.list_loaded_models("embedding")] assert not (set((EXPECTED_EMBEDDING_ID,)) - set(models)) @pytest.mark.lmstudio def test_list_loaded_LLMs() -> None: - models = [m.identifier for m in lm.list_loaded_models("llm")] + models = [m.identifier for m in lms.list_loaded_models("llm")] assert not (set((EXPECTED_LLM_ID, EXPECTED_VLM_ID, TOOL_LLM_ID)) - set(models)) diff --git a/tests/unload_models.py b/tests/unload_models.py index 7c085de..675651e 100644 --- a/tests/unload_models.py +++ b/tests/unload_models.py @@ -1,7 +1,7 @@ """Unload the models required by the test suite.""" import asyncio -import lmstudio as lm +import lmstudio as lms from .support import ( EXPECTED_EMBEDDING_ID, @@ -10,20 +10,20 @@ TOOL_LLM_ID, ) -AsyncSessionModel = lm.async_api.AsyncSessionEmbedding | lm.async_api.AsyncSessionLlm +AsyncSessionModel = lms.async_api.AsyncSessionEmbedding | lms.async_api.AsyncSessionLlm async def _unload_model(session: AsyncSessionModel, model_identifier: str) -> None: try: await session.unload(model_identifier) - except lm.LMStudioModelNotFoundError: + except lms.LMStudioModelNotFoundError: print(f"Unloaded: {model_identifier!r} (model was not loaded)") else: print(f"Unloaded: {model_identifier!r}") async def unload_models() -> None: - async with lm.AsyncClient() as client: + async with lms.AsyncClient() as client: await asyncio.gather( _unload_model(client.llm, EXPECTED_LLM_ID), _unload_model(client.llm, EXPECTED_VLM_ID),