@@ -909,15 +909,15 @@ async def __aexit__(
909909 self ._set_error (exc_val )
910910 await self .aclose ()
911911
912- async def __aiter__ (self ) -> AsyncIterator [str ]:
912+ async def __aiter__ (self ) -> AsyncIterator [LlmPredictionFragment ]:
913913 endpoint = self ._endpoint
914914 async with self :
915915 assert self ._channel is not None
916916 async for contents in self ._channel .rx_stream ():
917917 for event in endpoint .iter_message_events (contents ):
918918 endpoint .handle_rx_event (event )
919919 if isinstance (event , PredictionFragmentEvent ):
920- yield event .arg . content
920+ yield event .arg
921921 if endpoint .is_finished :
922922 break
923923 self ._mark_finished ()
@@ -1008,8 +1008,8 @@ async def _complete_stream(
10081008 on_prompt_processing_progress ,
10091009 )
10101010 channel_cm = self ._create_channel (endpoint )
1011- prediction = AsyncPredictionStream (channel_cm , endpoint )
1012- return prediction
1011+ prediction_stream = AsyncPredictionStream (channel_cm , endpoint )
1012+ return prediction_stream
10131013
10141014 @overload
10151015 async def _respond_stream (
@@ -1064,8 +1064,8 @@ async def _respond_stream(
10641064 on_prompt_processing_progress ,
10651065 )
10661066 channel_cm = self ._create_channel (endpoint )
1067- prediction = AsyncPredictionStream (channel_cm , endpoint )
1068- return prediction
1067+ prediction_stream = AsyncPredictionStream (channel_cm , endpoint )
1068+ return prediction_stream
10691069
10701070 async def _apply_prompt_template (
10711071 self ,
@@ -1264,7 +1264,7 @@ async def complete(
12641264 on_prompt_processing_progress : Callable [[float ], None ] | None = None ,
12651265 ) -> PredictionResult [str ] | PredictionResult [DictObject ]:
12661266 """Request a one-off prediction without any context."""
1267- prediction = await self ._session ._complete_stream (
1267+ prediction_stream = await self ._session ._complete_stream (
12681268 self .identifier ,
12691269 prompt ,
12701270 response_format = response_format ,
@@ -1274,11 +1274,11 @@ async def complete(
12741274 on_prediction_fragment = on_prediction_fragment ,
12751275 on_prompt_processing_progress = on_prompt_processing_progress ,
12761276 )
1277- async for _ in prediction :
1277+ async for _ in prediction_stream :
12781278 # No yield in body means iterator reliably provides
12791279 # prompt resource cleanup on coroutine cancellation
12801280 pass
1281- return prediction .result ()
1281+ return prediction_stream .result ()
12821282
12831283 @overload
12841284 async def respond_stream (
@@ -1365,7 +1365,7 @@ async def respond(
13651365 on_prompt_processing_progress : Callable [[float ], None ] | None = None ,
13661366 ) -> PredictionResult [str ] | PredictionResult [DictObject ]:
13671367 """Request a response in an ongoing assistant chat session."""
1368- prediction = await self ._session ._respond_stream (
1368+ prediction_stream = await self ._session ._respond_stream (
13691369 self .identifier ,
13701370 history ,
13711371 response_format = response_format ,
@@ -1375,11 +1375,11 @@ async def respond(
13751375 on_prediction_fragment = on_prediction_fragment ,
13761376 on_prompt_processing_progress = on_prompt_processing_progress ,
13771377 )
1378- async for _ in prediction :
1378+ async for _ in prediction_stream :
13791379 # No yield in body means iterator reliably provides
13801380 # prompt resource cleanup on coroutine cancellation
13811381 pass
1382- return prediction .result ()
1382+ return prediction_stream .result ()
13831383
13841384 @sdk_public_api_async ()
13851385 async def apply_prompt_template (
@@ -1411,7 +1411,7 @@ async def embed(
14111411TAsyncSession = TypeVar ("TAsyncSession" , bound = AsyncSession )
14121412
14131413_ASYNC_API_STABILITY_WARNING = """\
1414- Note: the async API is not yet stable and is expected to change in future releases
1414+ Note the async API is not yet stable and is expected to change in future releases
14151415"""
14161416
14171417
0 commit comments