Skip to content

Commit a897b4a

Browse files
authored
Merge branch 'main' into chore/support-for-narwhals-binary-dtype
2 parents 4ed254e + f7b7fb4 commit a897b4a

File tree

12 files changed

+80
-25
lines changed

12 files changed

+80
-25
lines changed

shiny/bookmark/_bookmark.py

Lines changed: 13 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
import warnings
44
from abc import ABC, abstractmethod
55
from pathlib import Path
6-
from typing import TYPE_CHECKING, Awaitable, Callable, Literal
6+
from typing import TYPE_CHECKING, Awaitable, Callable, Literal, Optional
77

88
from .._docstring import add_example
99
from .._utils import AsyncCallbacks, CancelCallback, wrap_async
@@ -167,7 +167,7 @@ def on_restored(
167167
@abstractmethod
168168
async def update_query_string(
169169
self,
170-
query_string: str,
170+
query_string: Optional[str] = None,
171171
mode: Literal["replace", "push"] = "replace",
172172
) -> None:
173173
"""
@@ -176,7 +176,7 @@ async def update_query_string(
176176
Parameters
177177
----------
178178
query_string
179-
The query string to set.
179+
The query string to set. If `None`, the current bookmark state URL will be used.
180180
mode
181181
Whether to replace the current URL or push a new one. Pushing a new value
182182
will add to the user's browser history.
@@ -448,9 +448,12 @@ async def invoke_on_restored_callbacks():
448448

449449
async def update_query_string(
450450
self,
451-
query_string: str,
451+
query_string: Optional[str] = None,
452452
mode: Literal["replace", "push"] = "replace",
453453
) -> None:
454+
if query_string is None:
455+
query_string = await self.get_bookmark_url()
456+
454457
if mode not in {"replace", "push"}:
455458
raise ValueError(f"Invalid mode: {mode}")
456459
await self._root_session._send_message(
@@ -723,7 +726,9 @@ def _restore_context(self) -> RestoreContext | None:
723726
return self._root_bookmark._restore_context
724727

725728
async def update_query_string(
726-
self, query_string: str, mode: Literal["replace", "push"] = "replace"
729+
self,
730+
query_string: Optional[str] = None,
731+
mode: Literal["replace", "push"] = "replace",
727732
) -> None:
728733
await self._root_bookmark.update_query_string(query_string, mode)
729734

@@ -769,7 +774,9 @@ def on_bookmarked(
769774
return lambda: None
770775

771776
async def update_query_string(
772-
self, query_string: str, mode: Literal["replace", "push"] = "replace"
777+
self,
778+
query_string: Optional[str] = None,
779+
mode: Literal["replace", "push"] = "replace",
773780
) -> None:
774781
# no-op within ExpressStub
775782
return None

shiny/templates/chat/llm-enterprise/aws-bedrock-anthropic/app.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
# them in a file named `.env`. The `python-dotenv` package will load `.env` as
1414
# environment variables which can be read by `os.getenv()`.
1515
load_dotenv()
16-
chat_model = ChatBedrockAnthropic(
16+
chat_client = ChatBedrockAnthropic(
1717
model="anthropic.claude-3-sonnet-20240229-v1:0",
1818
)
1919

@@ -32,5 +32,5 @@
3232
# Define a callback to run when the user submits a message
3333
@chat.on_user_submit
3434
async def handle_user_input(user_input: str):
35-
response = await chat_model.stream_async(user_input)
35+
response = await chat_client.stream_async(user_input)
3636
await chat.append_message_stream(response)

shiny/templates/chat/llm-enterprise/azure-openai/app.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
# See the docs for more information on how to obtain one.
1313
# https://posit-dev.github.io/chatlas/reference/ChatAzureOpenAI.html
1414
load_dotenv()
15-
chat_model = ChatAzureOpenAI(
15+
chat_client = ChatAzureOpenAI(
1616
api_key=os.getenv("AZURE_OPENAI_API_KEY"),
1717
endpoint="https://my-endpoint.openai.azure.com",
1818
deployment_id="gpt-4o-mini",
@@ -37,5 +37,5 @@
3737
# Define a callback to run when the user submits a message
3838
@chat.on_user_submit
3939
async def handle_user_input(user_input: str):
40-
response = await chat_model.stream_async(user_input)
40+
response = await chat_client.stream_async(user_input)
4141
await chat.append_message_stream(response)

shiny/templates/chat/llms/anthropic/app.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
# See the docs for more information on how to obtain one.
1313
# https://posit-dev.github.io/chatlas/reference/ChatAnthropic.html
1414
load_dotenv()
15-
chat_model = ChatAnthropic(
15+
chat_client = ChatAnthropic(
1616
api_key=os.environ.get("ANTHROPIC_API_KEY"),
1717
model="claude-3-7-sonnet-latest",
1818
system_prompt="You are a helpful assistant.",
@@ -37,5 +37,5 @@
3737
# Generate a response when the user submits a message
3838
@chat.on_user_submit
3939
async def handle_user_input(user_input: str):
40-
response = await chat_model.stream_async(user_input)
40+
response = await chat_client.stream_async(user_input)
4141
await chat.append_message_stream(response)

shiny/templates/chat/llms/google/app.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
# See the docs for more information on how to obtain one.
1313
# https://posit-dev.github.io/chatlas/reference/ChatGoogle.html
1414
load_dotenv()
15-
chat_model = ChatGoogle(
15+
chat_client = ChatGoogle(
1616
api_key=os.environ.get("GOOGLE_API_KEY"),
1717
system_prompt="You are a helpful assistant.",
1818
model="gemini-2.0-flash",
@@ -33,5 +33,5 @@
3333
# Generate a response when the user submits a message
3434
@chat.on_user_submit
3535
async def handle_user_input(user_input: str):
36-
response = await chat_model.stream_async(user_input)
36+
response = await chat_client.stream_async(user_input)
3737
await chat.append_message_stream(response)

shiny/templates/chat/llms/langchain/app.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
# app, or set them in a file named `.env`. The `python-dotenv` package will load `.env`
1616
# as environment variables which can later be read by `os.getenv()`.
1717
load_dotenv()
18-
chat_model = ChatOpenAI(
18+
chat_client = ChatOpenAI(
1919
api_key=os.environ.get("OPENAI_API_KEY"),
2020
model="gpt-4o",
2121
)
@@ -38,5 +38,5 @@
3838
# Define a callback to run when the user submits a message
3939
@chat.on_user_submit
4040
async def handle_user_input(user_input: str):
41-
response = await chat_model.stream_async(user_input)
41+
response = await chat_client.stream_async(user_input)
4242
await chat.append_message_stream(response)

shiny/templates/chat/llms/ollama/app.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
# ChatOllama() requires an Ollama model server to be running locally.
1010
# See the docs for more information on how to set up a local Ollama server.
1111
# https://posit-dev.github.io/chatlas/reference/ChatOllama.html
12-
chat_model = ChatOllama(model="llama3.2")
12+
chat_client = ChatOllama(model="llama3.2")
1313

1414
# Set some Shiny page options
1515
ui.page_opts(
@@ -29,5 +29,5 @@
2929
# Generate a response when the user submits a message
3030
@chat.on_user_submit
3131
async def handle_user_input(user_input: str):
32-
response = await chat_model.stream_async(user_input)
32+
response = await chat_client.stream_async(user_input)
3333
await chat.append_message_stream(response)

shiny/templates/chat/llms/openai/app.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
# See the docs for more information on how to obtain one.
1313
# https://posit-dev.github.io/chatlas/reference/ChatOpenAI.html
1414
load_dotenv()
15-
chat_model = ChatOpenAI(
15+
chat_client = ChatOpenAI(
1616
api_key=os.environ.get("OPENAI_API_KEY"),
1717
model="gpt-4o",
1818
system_prompt="You are a helpful assistant.",
@@ -37,5 +37,5 @@
3737
# Generate a response when the user submits a message
3838
@chat.on_user_submit
3939
async def handle_user_input(user_input: str):
40-
response = await chat_model.stream_async(user_input)
40+
response = await chat_client.stream_async(user_input)
4141
await chat.append_message_stream(response)

shiny/templates/chat/llms/playground/app.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -63,15 +63,15 @@ def get_model():
6363
}
6464

6565
if input.model() in models["openai"]:
66-
chat_model = ctl.ChatOpenAI(**model_params)
66+
chat_client = ctl.ChatOpenAI(**model_params)
6767
elif input.model() in models["claude"]:
68-
chat_model = ctl.ChatAnthropic(**model_params)
68+
chat_client = ctl.ChatAnthropic(**model_params)
6969
elif input.model() in models["google"]:
70-
chat_model = ctl.ChatGoogle(**model_params)
70+
chat_client = ctl.ChatGoogle(**model_params)
7171
else:
7272
raise ValueError(f"Invalid model: {input.model()}")
7373

74-
return chat_model
74+
return chat_client
7575

7676

7777
@reactive.calc

shiny/ui/_markdown_stream.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -158,6 +158,8 @@ async def _task():
158158

159159
_task()
160160

161+
self._latest_stream.set(_task)
162+
161163
# Since the task runs in the background (outside/beyond the current context,
162164
# if any), we need to manually raise any exceptions that occur
163165
@reactive.effect
@@ -208,7 +210,7 @@ def get_latest_stream_result(self) -> Union[str, None]:
208210
"The `.get_latest_stream_result()` method is deprecated and will be removed "
209211
"in a future release. Use `.latest_stream.result()` instead. "
210212
)
211-
self.latest_stream.result()
213+
return self.latest_stream.result()
212214

213215
async def clear(self):
214216
"""

0 commit comments

Comments
 (0)