File tree Expand file tree Collapse file tree 11 files changed +67
-19
lines changed
tests/playwright/shiny/components/MarkdownStream/stream-result Expand file tree Collapse file tree 11 files changed +67
-19
lines changed Original file line number Diff line number Diff line change 1313# them in a file named `.env`. The `python-dotenv` package will load `.env` as
1414# environment variables which can be read by `os.getenv()`.
1515load_dotenv ()
16- chat_model = ChatBedrockAnthropic (
16+ chat_client = ChatBedrockAnthropic (
1717 model = "anthropic.claude-3-sonnet-20240229-v1:0" ,
1818)
1919
3232# Define a callback to run when the user submits a message
3333@chat .on_user_submit
3434async def handle_user_input (user_input : str ):
35- response = await chat_model .stream_async (user_input )
35+ response = await chat_client .stream_async (user_input )
3636 await chat .append_message_stream (response )
Original file line number Diff line number Diff line change 1212# See the docs for more information on how to obtain one.
1313# https://posit-dev.github.io/chatlas/reference/ChatAzureOpenAI.html
1414load_dotenv ()
15- chat_model = ChatAzureOpenAI (
15+ chat_client = ChatAzureOpenAI (
1616 api_key = os .getenv ("AZURE_OPENAI_API_KEY" ),
1717 endpoint = "https://my-endpoint.openai.azure.com" ,
1818 deployment_id = "gpt-4o-mini" ,
3737# Define a callback to run when the user submits a message
3838@chat .on_user_submit
3939async def handle_user_input (user_input : str ):
40- response = await chat_model .stream_async (user_input )
40+ response = await chat_client .stream_async (user_input )
4141 await chat .append_message_stream (response )
Original file line number Diff line number Diff line change 1212# See the docs for more information on how to obtain one.
1313# https://posit-dev.github.io/chatlas/reference/ChatAnthropic.html
1414load_dotenv ()
15- chat_model = ChatAnthropic (
15+ chat_client = ChatAnthropic (
1616 api_key = os .environ .get ("ANTHROPIC_API_KEY" ),
1717 model = "claude-3-7-sonnet-latest" ,
1818 system_prompt = "You are a helpful assistant." ,
3737# Generate a response when the user submits a message
3838@chat .on_user_submit
3939async def handle_user_input (user_input : str ):
40- response = await chat_model .stream_async (user_input )
40+ response = await chat_client .stream_async (user_input )
4141 await chat .append_message_stream (response )
Original file line number Diff line number Diff line change 1212# See the docs for more information on how to obtain one.
1313# https://posit-dev.github.io/chatlas/reference/ChatGoogle.html
1414load_dotenv ()
15- chat_model = ChatGoogle (
15+ chat_client = ChatGoogle (
1616 api_key = os .environ .get ("GOOGLE_API_KEY" ),
1717 system_prompt = "You are a helpful assistant." ,
1818 model = "gemini-2.0-flash" ,
3333# Generate a response when the user submits a message
3434@chat .on_user_submit
3535async def handle_user_input (user_input : str ):
36- response = await chat_model .stream_async (user_input )
36+ response = await chat_client .stream_async (user_input )
3737 await chat .append_message_stream (response )
Original file line number Diff line number Diff line change 1515# app, or set them in a file named `.env`. The `python-dotenv` package will load `.env`
1616# as environment variables which can later be read by `os.getenv()`.
1717load_dotenv ()
18- chat_model = ChatOpenAI (
18+ chat_client = ChatOpenAI (
1919 api_key = os .environ .get ("OPENAI_API_KEY" ),
2020 model = "gpt-4o" ,
2121)
3838# Define a callback to run when the user submits a message
3939@chat .on_user_submit
4040async def handle_user_input (user_input : str ):
41- response = await chat_model .stream_async (user_input )
41+ response = await chat_client .stream_async (user_input )
4242 await chat .append_message_stream (response )
Original file line number Diff line number Diff line change 99# ChatOllama() requires an Ollama model server to be running locally.
1010# See the docs for more information on how to set up a local Ollama server.
1111# https://posit-dev.github.io/chatlas/reference/ChatOllama.html
12- chat_model = ChatOllama (model = "llama3.2" )
12+ chat_client = ChatOllama (model = "llama3.2" )
1313
1414# Set some Shiny page options
1515ui .page_opts (
2929# Generate a response when the user submits a message
3030@chat .on_user_submit
3131async def handle_user_input (user_input : str ):
32- response = await chat_model .stream_async (user_input )
32+ response = await chat_client .stream_async (user_input )
3333 await chat .append_message_stream (response )
Original file line number Diff line number Diff line change 1212# See the docs for more information on how to obtain one.
1313# https://posit-dev.github.io/chatlas/reference/ChatOpenAI.html
1414load_dotenv ()
15- chat_model = ChatOpenAI (
15+ chat_client = ChatOpenAI (
1616 api_key = os .environ .get ("OPENAI_API_KEY" ),
1717 model = "gpt-4o" ,
1818 system_prompt = "You are a helpful assistant." ,
3737# Generate a response when the user submits a message
3838@chat .on_user_submit
3939async def handle_user_input (user_input : str ):
40- response = await chat_model .stream_async (user_input )
40+ response = await chat_client .stream_async (user_input )
4141 await chat .append_message_stream (response )
Original file line number Diff line number Diff line change @@ -63,15 +63,15 @@ def get_model():
6363 }
6464
6565 if input .model () in models ["openai" ]:
66- chat_model = ctl .ChatOpenAI (** model_params )
66+ chat_client = ctl .ChatOpenAI (** model_params )
6767 elif input .model () in models ["claude" ]:
68- chat_model = ctl .ChatAnthropic (** model_params )
68+ chat_client = ctl .ChatAnthropic (** model_params )
6969 elif input .model () in models ["google" ]:
70- chat_model = ctl .ChatGoogle (** model_params )
70+ chat_client = ctl .ChatGoogle (** model_params )
7171 else :
7272 raise ValueError (f"Invalid model: { input .model ()} " )
7373
74- return chat_model
74+ return chat_client
7575
7676
7777@reactive .calc
Original file line number Diff line number Diff line change @@ -158,6 +158,8 @@ async def _task():
158158
159159 _task ()
160160
161+ self ._latest_stream .set (_task )
162+
161163 # Since the task runs in the background (outside/beyond the current context,
162164 # if any), we need to manually raise any exceptions that occur
163165 @reactive .effect
@@ -208,7 +210,7 @@ def get_latest_stream_result(self) -> Union[str, None]:
208210 "The `.get_latest_stream_result()` method is deprecated and will be removed "
209211 "in a future release. Use `.latest_stream.result()` instead. "
210212 )
211- self .latest_stream .result ()
213+ return self .latest_stream .result ()
212214
213215 async def clear (self ):
214216 """
Original file line number Diff line number Diff line change 1+ import asyncio
2+
3+ from shiny import reactive
4+ from shiny .express import input , render , ui
5+
6+ stream = ui .MarkdownStream ("stream_id" )
7+ stream .ui ()
8+
9+
10+ ui .input_action_button ("do_stream" , "Do stream" )
11+
12+
13+ async def gen ():
14+ yield "Hello "
15+ await asyncio .sleep (0.1 )
16+ yield "world!"
17+
18+
19+ @reactive .effect
20+ @reactive .event (input .do_stream )
21+ async def _ ():
22+ await stream .stream (gen ())
23+
24+
25+ @render .code
26+ def stream_result ():
27+ res = stream .latest_stream .result ()
28+ return f"Stream result: { res } "
You can’t perform that action at this time.
0 commit comments