@@ -690,7 +690,10 @@ def load_new_instance(
690690 config : TLoadConfig | TLoadConfigDict | None = None ,
691691 on_load_progress : ModelLoadingCallback | None = None ,
692692 ) -> TModelHandle :
693- """Load this model with the given identifier and configuration."""
693+ """Load this model with the given identifier and configuration.
694+
695+ Note: details of configuration fields may change in SDK feature releases.
696+ """
694697 handle : TModelHandle = self ._session ._load_new_instance (
695698 self .model_key , instance_identifier , ttl , config , on_load_progress
696699 )
@@ -704,6 +707,11 @@ def model(
704707 config : TLoadConfig | TLoadConfigDict | None = None ,
705708 on_load_progress : ModelLoadingCallback | None = None ,
706709 ) -> TModelHandle :
710+ """Retrieve model with default identifier, or load it with given configuration.
711+
712+ Note: configuration of retrieved model is NOT checked against the given config.
713+ Note: details of configuration fields may change in SDK feature releases.
714+ """
707715 # Call _get_or_load directly, since we have a model identifier
708716 handle : TModelHandle = self ._session ._get_or_load (
709717 self .model_key , ttl , config , on_load_progress
@@ -951,7 +959,11 @@ def model(
951959 config : TLoadConfig | TLoadConfigDict | None = None ,
952960 on_load_progress : ModelLoadingCallback | None = None ,
953961 ) -> TModelHandle :
954- """Get a handle to the specified model (loading it if necessary)."""
962+ """Get a handle to the specified model (loading it if necessary).
963+
964+ Note: configuration of retrieved model is NOT checked against the given config.
965+ Note: details of configuration fields may change in SDK feature releases.
966+ """
955967 if model_key is None :
956968 # Should this raise an error if a config is supplied?
957969 return self ._get_any ()
@@ -981,7 +993,10 @@ def load_new_instance(
981993 config : TLoadConfig | TLoadConfigDict | None = None ,
982994 on_load_progress : ModelLoadingCallback | None = None ,
983995 ) -> TModelHandle :
984- """Load the specified model with the given identifier and configuration."""
996+ """Load the specified model with the given identifier and configuration.
997+
998+ Note: details of configuration fields may change in SDK feature releases.
999+ """
9851000 return self ._load_new_instance (
9861001 model_key , instance_identifier , ttl , config , on_load_progress
9871002 )
@@ -1198,7 +1213,10 @@ def _complete_stream(
11981213 on_prediction_fragment : PredictionFragmentCallback | None = None ,
11991214 on_prompt_processing_progress : PromptProcessingCallback | None = None ,
12001215 ) -> PredictionStream [str ] | PredictionStream [DictObject ]:
1201- """Request a one-off prediction without any context and stream the generated tokens."""
1216+ """Request a one-off prediction without any context and stream the generated tokens.
1217+
1218+ Note: details of configuration fields may change in SDK feature releases.
1219+ """
12021220 endpoint = CompletionEndpoint (
12031221 model_specifier ,
12041222 prompt ,
@@ -1251,7 +1269,10 @@ def _respond_stream(
12511269 on_prediction_fragment : PredictionFragmentCallback | None = None ,
12521270 on_prompt_processing_progress : PromptProcessingCallback | None = None ,
12531271 ) -> PredictionStream [str ] | PredictionStream [DictObject ]:
1254- """Request a response in an ongoing assistant chat session and stream the generated tokens."""
1272+ """Request a response in an ongoing assistant chat session and stream the generated tokens.
1273+
1274+ Note: details of configuration fields may change in SDK feature releases.
1275+ """
12551276 if not isinstance (history , Chat ):
12561277 history = Chat .from_history (history )
12571278 endpoint = ChatResponseEndpoint (
@@ -1417,7 +1438,10 @@ def complete_stream(
14171438 on_prediction_fragment : PredictionFragmentCallback | None = None ,
14181439 on_prompt_processing_progress : PromptProcessingCallback | None = None ,
14191440 ) -> PredictionStream [str ] | PredictionStream [DictObject ]:
1420- """Request a one-off prediction without any context and stream the generated tokens."""
1441+ """Request a one-off prediction without any context and stream the generated tokens.
1442+
1443+ Note: details of configuration fields may change in SDK feature releases.
1444+ """
14211445 return self ._session ._complete_stream (
14221446 self .identifier ,
14231447 prompt ,
@@ -1465,7 +1489,10 @@ def complete(
14651489 on_prediction_fragment : PredictionFragmentCallback | None = None ,
14661490 on_prompt_processing_progress : PromptProcessingCallback | None = None ,
14671491 ) -> PredictionResult [str ] | PredictionResult [DictObject ]:
1468- """Request a one-off prediction without any context."""
1492+ """Request a one-off prediction without any context.
1493+
1494+ Note: details of configuration fields may change in SDK feature releases.
1495+ """
14691496 prediction_stream = self ._session ._complete_stream (
14701497 self .identifier ,
14711498 prompt ,
@@ -1518,7 +1545,10 @@ def respond_stream(
15181545 on_prediction_fragment : PredictionFragmentCallback | None = None ,
15191546 on_prompt_processing_progress : PromptProcessingCallback | None = None ,
15201547 ) -> PredictionStream [str ] | PredictionStream [DictObject ]:
1521- """Request a response in an ongoing assistant chat session and stream the generated tokens."""
1548+ """Request a response in an ongoing assistant chat session and stream the generated tokens.
1549+
1550+ Note: details of configuration fields may change in SDK feature releases.
1551+ """
15221552 return self ._session ._respond_stream (
15231553 self .identifier ,
15241554 history ,
@@ -1566,7 +1596,10 @@ def respond(
15661596 on_prediction_fragment : PredictionFragmentCallback | None = None ,
15671597 on_prompt_processing_progress : PromptProcessingCallback | None = None ,
15681598 ) -> PredictionResult [str ] | PredictionResult [DictObject ]:
1569- """Request a response in an ongoing assistant chat session."""
1599+ """Request a response in an ongoing assistant chat session.
1600+
1601+ Note: details of configuration fields may change in SDK feature releases.
1602+ """
15701603 prediction_stream = self ._session ._respond_stream (
15711604 self .identifier ,
15721605 history ,
@@ -1608,7 +1641,10 @@ def act(
16081641 ]
16091642 | None = None ,
16101643 ) -> ActResult :
1611- """Request a response (with implicit tool use) in an ongoing agent chat session."""
1644+ """Request a response (with implicit tool use) in an ongoing agent chat session.
1645+
1646+ Note: details of configuration fields may change in SDK feature releases.
1647+ """
16121648 start_time = time .perf_counter ()
16131649 # It is not yet possible to combine tool calling with requests for structured responses
16141650 response_format = None
@@ -1920,7 +1956,11 @@ def llm(
19201956 ttl : int | None = DEFAULT_TTL ,
19211957 config : LlmLoadModelConfig | LlmLoadModelConfigDict | None = None ,
19221958) -> LLM :
1923- """Access an LLM using the default global client."""
1959+ """Access an LLM using the default global client.
1960+
1961+ Note: configuration of retrieved model is NOT checked against the given config.
1962+ Note: details of configuration fields may change in SDK feature releases.
1963+ """
19241964 return get_default_client ().llm .model (model_key , ttl = ttl , config = config )
19251965
19261966
@@ -1932,7 +1972,11 @@ def embedding_model(
19321972 ttl : int | None = DEFAULT_TTL ,
19331973 config : EmbeddingLoadModelConfig | EmbeddingLoadModelConfigDict | None = None ,
19341974) -> EmbeddingModel :
1935- """Access an embedding model using the default global client."""
1975+ """Access an embedding model using the default global client.
1976+
1977+ Note: configuration of retrieved model is NOT checked against the given config.
1978+ Note: details of configuration fields may change in SDK feature releases.
1979+ """
19361980 return get_default_client ().embedding .model (model_key , ttl = ttl , config = config )
19371981
19381982
0 commit comments