22
33import logging
44
5+ import anyio
56import pytest
67from pytest import LogCaptureFixture as LogCap
78
8- from lmstudio import AsyncClient , LlmLoadModelConfig , history
9+ from lmstudio import (
10+ AsyncClient ,
11+ LlmLoadModelConfig ,
12+ LMStudioModelNotFoundError ,
13+ history ,
14+ )
915
10- from ..support import EXPECTED_LLM , EXPECTED_LLM_ID
16+ from ..support import EXPECTED_LLM , EXPECTED_LLM_ID , check_sdk_error
1117
1218
1319@pytest .mark .asyncio
@@ -52,10 +58,14 @@ async def test_tokenize_async(model_id: str, caplog: LogCap) -> None:
5258
5359 caplog .set_level (logging .DEBUG )
5460 async with AsyncClient () as client :
55- response = await client .llm ._tokenize (model_id , input = text )
61+ model = await client .llm .model (model_id )
62+ num_tokens = await model .count_tokens (text )
63+ response = await model .tokenize (text )
5664 logging .info (f"Tokenization response: { response } " )
5765 assert response
5866 assert isinstance (response , list )
67+ # Ensure token count and tokenization are consistent
68+ assert len (response ) == num_tokens
5969
6070
6171@pytest .mark .asyncio
@@ -66,7 +76,8 @@ async def test_tokenize_list_async(model_id: str, caplog: LogCap) -> None:
6676
6777 caplog .set_level (logging .DEBUG )
6878 async with AsyncClient () as client :
69- response = await client .llm ._tokenize (model_id , input = text )
79+ model = await client .llm .model (model_id )
80+ response = await model .tokenize (text )
7081 logging .info (f"Tokenization response: { response } " )
7182 assert response
7283 assert isinstance (response , list )
@@ -109,3 +120,33 @@ async def test_get_model_info_async(model_id: str, caplog: LogCap) -> None:
109120 response = await client .llm .get_model_info (model_id )
110121 logging .info (f"Model config response: { response } " )
111122 assert response
123+
124+
125+ @pytest .mark .asyncio
126+ @pytest .mark .lmstudio
127+ async def test_invalid_model_request_async (caplog : LogCap ) -> None :
128+ caplog .set_level (logging .DEBUG )
129+ async with AsyncClient () as client :
130+ # Deliberately create an invalid model handle
131+ model = client .llm ._create_handle ("No such model" )
132+ # This should error rather than timing out,
133+ # but avoid any risk of the client hanging...
134+ with anyio .fail_after (30 ):
135+ with pytest .raises (LMStudioModelNotFoundError ) as exc_info :
136+ await model .complete ("Some text" )
137+ check_sdk_error (exc_info , __file__ )
138+ with anyio .fail_after (30 ):
139+ with pytest .raises (LMStudioModelNotFoundError ) as exc_info :
140+ await model .respond ("Some text" )
141+ check_sdk_error (exc_info , __file__ )
142+ with anyio .fail_after (30 ):
143+ with pytest .raises (LMStudioModelNotFoundError ) as exc_info :
144+ await model .count_tokens ("Some text" )
145+ with anyio .fail_after (30 ):
146+ with pytest .raises (LMStudioModelNotFoundError ) as exc_info :
147+ await model .tokenize ("Some text" )
148+ check_sdk_error (exc_info , __file__ )
149+ with anyio .fail_after (30 ):
150+ with pytest .raises (LMStudioModelNotFoundError ) as exc_info :
151+ await model .get_context_length ()
152+ check_sdk_error (exc_info , __file__ )
0 commit comments