Skip to content

Commit 8f295f9

Browse files
committed
Add docstring caveats on config API stability
1 parent 942ba8b commit 8f295f9

File tree

2 files changed

+98
-21
lines changed

2 files changed

+98
-21
lines changed

src/lmstudio/async_api.py

Lines changed: 42 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -502,7 +502,10 @@ async def load_new_instance(
502502
config: TLoadConfig | TLoadConfigDict | None = None,
503503
on_load_progress: ModelLoadingCallback | None = None,
504504
) -> TAsyncModelHandle:
505-
"""Load this model with the given identifier and configuration."""
505+
"""Load this model with the given identifier and configuration.
506+
507+
Note: details of configuration fields may change in SDK feature releases.
508+
"""
506509
handle: TAsyncModelHandle = await self._session._load_new_instance(
507510
self.model_key, instance_identifier, ttl, config, on_load_progress
508511
)
@@ -516,6 +519,11 @@ async def model(
516519
config: TLoadConfig | TLoadConfigDict | None = None,
517520
on_load_progress: ModelLoadingCallback | None = None,
518521
) -> TAsyncModelHandle:
522+
"""Retrieve model with given identifier, or load it with given configuration.
523+
524+
Note: configuration of retrieved model is NOT checked against the given config.
525+
Note: details of configuration fields may change in SDK feature releases.
526+
"""
519527
# Call _get_or_load directly, since we have a model identifier
520528
handle: TAsyncModelHandle = await self._session._get_or_load(
521529
self.model_key, ttl, config, on_load_progress
@@ -786,7 +794,11 @@ async def model(
786794
config: TLoadConfig | TLoadConfigDict | None = None,
787795
on_load_progress: ModelLoadingCallback | None = None,
788796
) -> TAsyncModelHandle:
789-
"""Get a handle to the specified model (loading it if necessary)."""
797+
"""Get a handle to the specified model (loading it if necessary).
798+
799+
Note: configuration of retrieved model is NOT checked against the given config.
800+
Note: details of configuration fields may change in SDK feature releases.
801+
"""
790802
if model_key is None:
791803
# Should this raise an error if a config is supplied?
792804
return await self._get_any()
@@ -816,7 +828,10 @@ async def load_new_instance(
816828
config: TLoadConfig | TLoadConfigDict | None = None,
817829
on_load_progress: ModelLoadingCallback | None = None,
818830
) -> TAsyncModelHandle:
819-
"""Load the specified model with the given identifier and configuration."""
831+
"""Load the specified model with the given identifier and configuration.
832+
833+
Note: details of configuration fields may change in SDK feature releases.
834+
"""
820835
return await self._load_new_instance(
821836
model_key, instance_identifier, ttl, config, on_load_progress
822837
)
@@ -1033,7 +1048,10 @@ async def _complete_stream(
10331048
on_prediction_fragment: PredictionFragmentCallback | None = None,
10341049
on_prompt_processing_progress: PromptProcessingCallback | None = None,
10351050
) -> AsyncPredictionStream[str] | AsyncPredictionStream[DictObject]:
1036-
"""Request a one-off prediction without any context and stream the generated tokens."""
1051+
"""Request a one-off prediction without any context and stream the generated tokens.
1052+
1053+
Note: details of configuration fields may change in SDK feature releases.
1054+
"""
10371055
endpoint = CompletionEndpoint(
10381056
model_specifier,
10391057
prompt,
@@ -1086,7 +1104,10 @@ async def _respond_stream(
10861104
on_prediction_fragment: PredictionFragmentCallback | None = None,
10871105
on_prompt_processing_progress: PromptProcessingCallback | None = None,
10881106
) -> AsyncPredictionStream[str] | AsyncPredictionStream[DictObject]:
1089-
"""Request a response in an ongoing assistant chat session and stream the generated tokens."""
1107+
"""Request a response in an ongoing assistant chat session and stream the generated tokens.
1108+
1109+
Note: details of configuration fields may change in SDK feature releases.
1110+
"""
10901111
if not isinstance(history, Chat):
10911112
history = Chat.from_history(history)
10921113
endpoint = ChatResponseEndpoint(
@@ -1256,7 +1277,10 @@ async def complete_stream(
12561277
on_prediction_fragment: PredictionFragmentCallback | None = None,
12571278
on_prompt_processing_progress: PromptProcessingCallback | None = None,
12581279
) -> AsyncPredictionStream[str] | AsyncPredictionStream[DictObject]:
1259-
"""Request a one-off prediction without any context and stream the generated tokens."""
1280+
"""Request a one-off prediction without any context and stream the generated tokens.
1281+
1282+
Note: details of configuration fields may change in SDK feature releases.
1283+
"""
12601284
return await self._session._complete_stream(
12611285
self.identifier,
12621286
prompt,
@@ -1304,7 +1328,10 @@ async def complete(
13041328
on_prediction_fragment: PredictionFragmentCallback | None = None,
13051329
on_prompt_processing_progress: PromptProcessingCallback | None = None,
13061330
) -> PredictionResult[str] | PredictionResult[DictObject]:
1307-
"""Request a one-off prediction without any context."""
1331+
"""Request a one-off prediction without any context.
1332+
1333+
Note: details of configuration fields may change in SDK feature releases.
1334+
"""
13081335
prediction_stream = await self._session._complete_stream(
13091336
self.identifier,
13101337
prompt,
@@ -1357,7 +1384,10 @@ async def respond_stream(
13571384
on_prediction_fragment: PredictionFragmentCallback | None = None,
13581385
on_prompt_processing_progress: PromptProcessingCallback | None = None,
13591386
) -> AsyncPredictionStream[str] | AsyncPredictionStream[DictObject]:
1360-
"""Request a response in an ongoing assistant chat session and stream the generated tokens."""
1387+
"""Request a response in an ongoing assistant chat session and stream the generated tokens.
1388+
1389+
Note: details of configuration fields may change in SDK feature releases.
1390+
"""
13611391
return await self._session._respond_stream(
13621392
self.identifier,
13631393
history,
@@ -1405,7 +1435,10 @@ async def respond(
14051435
on_prediction_fragment: PredictionFragmentCallback | None = None,
14061436
on_prompt_processing_progress: PromptProcessingCallback | None = None,
14071437
) -> PredictionResult[str] | PredictionResult[DictObject]:
1408-
"""Request a response in an ongoing assistant chat session."""
1438+
"""Request a response in an ongoing assistant chat session.
1439+
1440+
Note: details of configuration fields may change in SDK feature releases.
1441+
"""
14091442
prediction_stream = await self._session._respond_stream(
14101443
self.identifier,
14111444
history,

src/lmstudio/sync_api.py

Lines changed: 56 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -690,7 +690,10 @@ def load_new_instance(
690690
config: TLoadConfig | TLoadConfigDict | None = None,
691691
on_load_progress: ModelLoadingCallback | None = None,
692692
) -> TModelHandle:
693-
"""Load this model with the given identifier and configuration."""
693+
"""Load this model with the given identifier and configuration.
694+
695+
Note: details of configuration fields may change in SDK feature releases.
696+
"""
694697
handle: TModelHandle = self._session._load_new_instance(
695698
self.model_key, instance_identifier, ttl, config, on_load_progress
696699
)
@@ -704,6 +707,11 @@ def model(
704707
config: TLoadConfig | TLoadConfigDict | None = None,
705708
on_load_progress: ModelLoadingCallback | None = None,
706709
) -> TModelHandle:
710+
"""Retrieve model with default identifier, or load it with given configuration.
711+
712+
Note: configuration of retrieved model is NOT checked against the given config.
713+
Note: details of configuration fields may change in SDK feature releases.
714+
"""
707715
# Call _get_or_load directly, since we have a model identifier
708716
handle: TModelHandle = self._session._get_or_load(
709717
self.model_key, ttl, config, on_load_progress
@@ -951,7 +959,11 @@ def model(
951959
config: TLoadConfig | TLoadConfigDict | None = None,
952960
on_load_progress: ModelLoadingCallback | None = None,
953961
) -> TModelHandle:
954-
"""Get a handle to the specified model (loading it if necessary)."""
962+
"""Get a handle to the specified model (loading it if necessary).
963+
964+
Note: configuration of retrieved model is NOT checked against the given config.
965+
Note: details of configuration fields may change in SDK feature releases.
966+
"""
955967
if model_key is None:
956968
# Should this raise an error if a config is supplied?
957969
return self._get_any()
@@ -981,7 +993,10 @@ def load_new_instance(
981993
config: TLoadConfig | TLoadConfigDict | None = None,
982994
on_load_progress: ModelLoadingCallback | None = None,
983995
) -> TModelHandle:
984-
"""Load the specified model with the given identifier and configuration."""
996+
"""Load the specified model with the given identifier and configuration.
997+
998+
Note: details of configuration fields may change in SDK feature releases.
999+
"""
9851000
return self._load_new_instance(
9861001
model_key, instance_identifier, ttl, config, on_load_progress
9871002
)
@@ -1198,7 +1213,10 @@ def _complete_stream(
11981213
on_prediction_fragment: PredictionFragmentCallback | None = None,
11991214
on_prompt_processing_progress: PromptProcessingCallback | None = None,
12001215
) -> PredictionStream[str] | PredictionStream[DictObject]:
1201-
"""Request a one-off prediction without any context and stream the generated tokens."""
1216+
"""Request a one-off prediction without any context and stream the generated tokens.
1217+
1218+
Note: details of configuration fields may change in SDK feature releases.
1219+
"""
12021220
endpoint = CompletionEndpoint(
12031221
model_specifier,
12041222
prompt,
@@ -1251,7 +1269,10 @@ def _respond_stream(
12511269
on_prediction_fragment: PredictionFragmentCallback | None = None,
12521270
on_prompt_processing_progress: PromptProcessingCallback | None = None,
12531271
) -> PredictionStream[str] | PredictionStream[DictObject]:
1254-
"""Request a response in an ongoing assistant chat session and stream the generated tokens."""
1272+
"""Request a response in an ongoing assistant chat session and stream the generated tokens.
1273+
1274+
Note: details of configuration fields may change in SDK feature releases.
1275+
"""
12551276
if not isinstance(history, Chat):
12561277
history = Chat.from_history(history)
12571278
endpoint = ChatResponseEndpoint(
@@ -1417,7 +1438,10 @@ def complete_stream(
14171438
on_prediction_fragment: PredictionFragmentCallback | None = None,
14181439
on_prompt_processing_progress: PromptProcessingCallback | None = None,
14191440
) -> PredictionStream[str] | PredictionStream[DictObject]:
1420-
"""Request a one-off prediction without any context and stream the generated tokens."""
1441+
"""Request a one-off prediction without any context and stream the generated tokens.
1442+
1443+
Note: details of configuration fields may change in SDK feature releases.
1444+
"""
14211445
return self._session._complete_stream(
14221446
self.identifier,
14231447
prompt,
@@ -1465,7 +1489,10 @@ def complete(
14651489
on_prediction_fragment: PredictionFragmentCallback | None = None,
14661490
on_prompt_processing_progress: PromptProcessingCallback | None = None,
14671491
) -> PredictionResult[str] | PredictionResult[DictObject]:
1468-
"""Request a one-off prediction without any context."""
1492+
"""Request a one-off prediction without any context.
1493+
1494+
Note: details of configuration fields may change in SDK feature releases.
1495+
"""
14691496
prediction_stream = self._session._complete_stream(
14701497
self.identifier,
14711498
prompt,
@@ -1518,7 +1545,10 @@ def respond_stream(
15181545
on_prediction_fragment: PredictionFragmentCallback | None = None,
15191546
on_prompt_processing_progress: PromptProcessingCallback | None = None,
15201547
) -> PredictionStream[str] | PredictionStream[DictObject]:
1521-
"""Request a response in an ongoing assistant chat session and stream the generated tokens."""
1548+
"""Request a response in an ongoing assistant chat session and stream the generated tokens.
1549+
1550+
Note: details of configuration fields may change in SDK feature releases.
1551+
"""
15221552
return self._session._respond_stream(
15231553
self.identifier,
15241554
history,
@@ -1566,7 +1596,10 @@ def respond(
15661596
on_prediction_fragment: PredictionFragmentCallback | None = None,
15671597
on_prompt_processing_progress: PromptProcessingCallback | None = None,
15681598
) -> PredictionResult[str] | PredictionResult[DictObject]:
1569-
"""Request a response in an ongoing assistant chat session."""
1599+
"""Request a response in an ongoing assistant chat session.
1600+
1601+
Note: details of configuration fields may change in SDK feature releases.
1602+
"""
15701603
prediction_stream = self._session._respond_stream(
15711604
self.identifier,
15721605
history,
@@ -1608,7 +1641,10 @@ def act(
16081641
]
16091642
| None = None,
16101643
) -> ActResult:
1611-
"""Request a response (with implicit tool use) in an ongoing agent chat session."""
1644+
"""Request a response (with implicit tool use) in an ongoing agent chat session.
1645+
1646+
Note: details of configuration fields may change in SDK feature releases.
1647+
"""
16121648
start_time = time.perf_counter()
16131649
# It is not yet possible to combine tool calling with requests for structured responses
16141650
response_format = None
@@ -1920,7 +1956,11 @@ def llm(
19201956
ttl: int | None = DEFAULT_TTL,
19211957
config: LlmLoadModelConfig | LlmLoadModelConfigDict | None = None,
19221958
) -> LLM:
1923-
"""Access an LLM using the default global client."""
1959+
"""Access an LLM using the default global client.
1960+
1961+
Note: configuration of retrieved model is NOT checked against the given config.
1962+
Note: details of configuration fields may change in SDK feature releases.
1963+
"""
19241964
return get_default_client().llm.model(model_key, ttl=ttl, config=config)
19251965

19261966

@@ -1932,7 +1972,11 @@ def embedding_model(
19321972
ttl: int | None = DEFAULT_TTL,
19331973
config: EmbeddingLoadModelConfig | EmbeddingLoadModelConfigDict | None = None,
19341974
) -> EmbeddingModel:
1935-
"""Access an embedding model using the default global client."""
1975+
"""Access an embedding model using the default global client.
1976+
1977+
Note: configuration of retrieved model is NOT checked against the given config.
1978+
Note: details of configuration fields may change in SDK feature releases.
1979+
"""
19361980
return get_default_client().embedding.model(model_key, ttl=ttl, config=config)
19371981

19381982

0 commit comments

Comments
 (0)