diff --git a/shiny/_main_create.py b/shiny/_main_create.py index 8704d50b6..ee1e5aaf3 100644 --- a/shiny/_main_create.py +++ b/shiny/_main_create.py @@ -223,10 +223,6 @@ def apps(self) -> list[ShinyTemplate]: def packages(self) -> list[ShinyTemplate]: return self._templates("templates/package") - @property - def chat_starters(self) -> list[ShinyTemplate]: - return self._templates("templates/chat/starters") - @property def chat_llms(self) -> list[ShinyTemplate]: return self._templates("templates/chat/llms") @@ -235,6 +231,14 @@ def chat_llms(self) -> list[ShinyTemplate]: def chat_enterprise(self) -> list[ShinyTemplate]: return self._templates("templates/chat/llm-enterprise") + @property + def stream_llms(self) -> list[ShinyTemplate]: + return self._templates("templates/markdown-stream/llms") + + @property + def stream_enterprise(self) -> list[ShinyTemplate]: + return self._templates("templates/markdown-stream/llm-enterpise") + shiny_internal_templates = ShinyInternalTemplates() @@ -261,15 +265,16 @@ def use_internal_template( app_templates = shiny_internal_templates.apps pkg_templates = shiny_internal_templates.packages - chat_templates = [ - *shiny_internal_templates.chat_starters, + gen_ai_templates = [ *shiny_internal_templates.chat_llms, *shiny_internal_templates.chat_enterprise, + *shiny_internal_templates.stream_llms, + *shiny_internal_templates.stream_enterprise, ] menu_choices = [ + Choice(title="Generative AI...", value="_gen-ai"), Choice(title="Custom JavaScript component...", value="_js-component"), - Choice(title="Chat component templates...", value="_chat"), Choice( title="Choose from the Shiny Templates website", value="_external-gallery" ), @@ -279,7 +284,7 @@ def use_internal_template( question_state = question_choose_template(app_templates, *menu_choices) template = template_by_name( - [*app_templates, *pkg_templates, *chat_templates], question_state + [*app_templates, *pkg_templates, *gen_ai_templates], question_state ) if template is not None: @@ -302,8 +307,8 @@ def use_internal_template( sys.exit(0) elif question_state == "_js-component": use_internal_package_template(dest_dir=dest_dir, package_name=package_name) - elif question_state == "_chat": - use_internal_chat_ai_template(dest_dir=dest_dir, package_name=package_name) + elif question_state == "_gen-ai": + use_internal_gen_ai_template(dest_dir=dest_dir, package_name=package_name) else: valid_choices = [t.id for t in app_templates + pkg_templates] if question_state not in valid_choices: @@ -345,18 +350,24 @@ def use_internal_package_template( package_template_questions(template, dest_dir=dest_dir, package_name=package_name) -def use_internal_chat_ai_template( +def use_internal_gen_ai_template( input: str | None = None, dest_dir: Optional[Path] = None, package_name: Optional[str] = None, ): if input is None: input = questionary.select( - "Which kind of chat template would you like?", + "Which kind of Gen AI template would you like?", choices=[ - Choice(title="Chat starters...", value="_chat-starters"), - Choice(title="LLM powered chat...", value="_chat-llms"), - Choice(title="Enterprise LLM...", value="_chat-llm_enterprise"), + Choice(title="Chat with LLM...", value="_chat-llms"), + Choice( + title="Chat with enterprise LLM...", value="_chat-llm_enterprise" + ), + Choice(title="Stream markdown with LLM...", value="_stream-llms"), + Choice( + title="Stream markdown with enterprise LLM...", + value="_stream-enterprise", + ), back_choice, cancel_choice, ], @@ -370,29 +381,34 @@ def use_internal_chat_ai_template( use_internal_template(dest_dir=dest_dir, package_name=package_name) return - use_internal_chat_ai_template( + use_internal_gen_ai_template( input, dest_dir=dest_dir, package_name=package_name ) return - if input == "_chat-starters": - template_choices = shiny_internal_templates.chat_starters - elif input == "_chat-llms": + if input == "_chat-llms": template_choices = shiny_internal_templates.chat_llms - else: + elif input == "_chat-llm_enterprise": template_choices = shiny_internal_templates.chat_enterprise + elif input == "_stream-llms": + template_choices = shiny_internal_templates.stream_llms + elif input == "_stream-enterprise": + template_choices = shiny_internal_templates.stream_enterprise + else: + raise ValueError(f"Invalid Gen AI template choice: {input}") choice = question_choose_template(template_choices, back_choice) if choice == "back": - use_internal_chat_ai_template(dest_dir=dest_dir, package_name=package_name) + use_internal_gen_ai_template(dest_dir=dest_dir, package_name=package_name) return template = template_by_name( [ - *shiny_internal_templates.chat_starters, *shiny_internal_templates.chat_llms, *shiny_internal_templates.chat_enterprise, + *shiny_internal_templates.stream_llms, + *shiny_internal_templates.stream_enterprise, ], choice, ) diff --git a/shiny/templates/chat/starters/hello/_template.json b/shiny/api-examples/Chat/_template.json similarity index 100% rename from shiny/templates/chat/starters/hello/_template.json rename to shiny/api-examples/Chat/_template.json diff --git a/shiny/templates/chat/starters/hello/app-core.py b/shiny/api-examples/Chat/app-core.py similarity index 77% rename from shiny/templates/chat/starters/hello/app-core.py rename to shiny/api-examples/Chat/app-core.py index 8628b85df..4c4dc79c5 100644 --- a/shiny/templates/chat/starters/hello/app-core.py +++ b/shiny/api-examples/Chat/app-core.py @@ -9,8 +9,10 @@ # Create a welcome message welcome = """ Hi! This is a simple Shiny `Chat` UI. Enter a message below and I will -simply repeat it back to you. For more examples, see this -[folder of examples](https://github.com/posit-dev/py-shiny/tree/main/shiny/templates/chat). +simply repeat it back to you. + +To learn more about chatbots and how to build them with Shiny, check out +[the documentation](https://shiny.posit.co/py/docs/genai-chatbots.html). """ diff --git a/shiny/templates/chat/starters/hello/app-express.py b/shiny/api-examples/Chat/app-express.py similarity index 77% rename from shiny/templates/chat/starters/hello/app-express.py rename to shiny/api-examples/Chat/app-express.py index e7f8cfc2c..f268d0b5a 100644 --- a/shiny/templates/chat/starters/hello/app-express.py +++ b/shiny/api-examples/Chat/app-express.py @@ -10,8 +10,10 @@ # Create a welcome message welcome = """ Hi! This is a simple Shiny `Chat` UI. Enter a message below and I will -simply repeat it back to you. For more examples, see this -[folder of examples](https://github.com/posit-dev/py-shiny/tree/main/shiny/templates/chat). +simply repeat it back to you. + +To learn more about chatbots and how to build them with Shiny, check out +[the documentation](https://shiny.posit.co/py/docs/genai-chatbots.html). """ # Create a chat instance diff --git a/shiny/templates/chat/starters/hello/requirements.txt b/shiny/api-examples/Chat/requirements.txt similarity index 100% rename from shiny/templates/chat/starters/hello/requirements.txt rename to shiny/api-examples/Chat/requirements.txt diff --git a/shiny/templates/chat/llm-enterprise/aws-bedrock-anthropic/_template.json b/shiny/templates/chat/llm-enterprise/aws-bedrock-anthropic/_template.json index eb6e9ff7b..a023da9f0 100644 --- a/shiny/templates/chat/llm-enterprise/aws-bedrock-anthropic/_template.json +++ b/shiny/templates/chat/llm-enterprise/aws-bedrock-anthropic/_template.json @@ -14,6 +14,14 @@ { "type": "action", "text": "Learn more at https://posit-dev.github.io/chatlas/reference/ChatBedrockAnthropic.html" + }, + { + "type": "info", + "text": "Want to learn more about AI chatbots?" + }, + { + "type": "action", + "text": "Visit https://shiny.posit.co/py/docs/genai-chatbots.html" } ] } diff --git a/shiny/templates/chat/llm-enterprise/azure-openai/_template.json b/shiny/templates/chat/llm-enterprise/azure-openai/_template.json index 9bd24ede2..edf2aa343 100644 --- a/shiny/templates/chat/llm-enterprise/azure-openai/_template.json +++ b/shiny/templates/chat/llm-enterprise/azure-openai/_template.json @@ -14,6 +14,14 @@ { "type": "action", "text": "Learn more at https://posit-dev.github.io/chatlas/reference/ChatAzureOpenAI.html" + }, + { + "type": "info", + "text": "Want to learn more about AI chatbots?" + }, + { + "type": "action", + "text": "Visit https://shiny.posit.co/py/docs/genai-chatbots.html" } ] } diff --git a/shiny/templates/chat/llms/anthropic/_template.json b/shiny/templates/chat/llms/anthropic/_template.json index c727ce455..34c92fb19 100644 --- a/shiny/templates/chat/llms/anthropic/_template.json +++ b/shiny/templates/chat/llms/anthropic/_template.json @@ -14,6 +14,14 @@ { "type": "action", "text": "Learn how to obtain one at https://posit-dev.github.io/chatlas/reference/ChatAnthropic.html" + }, + { + "type": "info", + "text": "Want to learn more about AI chatbots?" + }, + { + "type": "action", + "text": "Visit https://shiny.posit.co/py/docs/genai-chatbots.html" } ] } diff --git a/shiny/templates/chat/llms/google/_template.json b/shiny/templates/chat/llms/google/_template.json index 8ee55e079..455f9c902 100644 --- a/shiny/templates/chat/llms/google/_template.json +++ b/shiny/templates/chat/llms/google/_template.json @@ -14,6 +14,14 @@ { "type": "action", "text": "Learn how to obtain one at https://posit-dev.github.io/chatlas/reference/ChatGoogle.html" + }, + { + "type": "info", + "text": "Want to learn more about AI chatbots?" + }, + { + "type": "action", + "text": "Visit https://shiny.posit.co/py/docs/genai-chatbots.html" } ] } diff --git a/shiny/templates/chat/llms/langchain/_template.json b/shiny/templates/chat/llms/langchain/_template.json index b7731fd68..a4940906b 100644 --- a/shiny/templates/chat/llms/langchain/_template.json +++ b/shiny/templates/chat/llms/langchain/_template.json @@ -5,5 +5,15 @@ "next_steps": [ "Put your OpenAI API key in the `template.env` file and rename it to `.env`.", "Run the app with `shiny run app.py`." + ], + "follow_up": [ + { + "type": "info", + "text": "Want to learn more about AI chatbots?" + }, + { + "type": "action", + "text": "Visit https://shiny.posit.co/py/docs/genai-chatbots.html" + } ] } diff --git a/shiny/templates/chat/llms/langchain/app.py b/shiny/templates/chat/llms/langchain/app.py index 13a596762..8ecc5c0ef 100644 --- a/shiny/templates/chat/llms/langchain/app.py +++ b/shiny/templates/chat/llms/langchain/app.py @@ -38,5 +38,10 @@ # Define a callback to run when the user submits a message @chat.on_user_submit async def handle_user_input(user_input: str): - response = await chat_client.stream_async(user_input) - await chat.append_message_stream(response) + response = chat_client.astream(user_input) + + async def stream_wrapper(): + async for item in response: + yield item.content + + await chat.append_message_stream(stream_wrapper()) diff --git a/shiny/templates/chat/llms/ollama/_template.json b/shiny/templates/chat/llms/ollama/_template.json index fb0fb217a..9cefc8632 100644 --- a/shiny/templates/chat/llms/ollama/_template.json +++ b/shiny/templates/chat/llms/ollama/_template.json @@ -15,6 +15,14 @@ { "type": "action", "text": "Learn more at https://posit-dev.github.io/chatlas/reference/ChatOllama.html" + }, + { + "type": "info", + "text": "Want to learn more about AI chatbots?" + }, + { + "type": "action", + "text": "Visit https://shiny.posit.co/py/docs/genai-chatbots.html" } ] } diff --git a/shiny/templates/chat/llms/openai/_template.json b/shiny/templates/chat/llms/openai/_template.json index 4fcf812e8..83941f981 100644 --- a/shiny/templates/chat/llms/openai/_template.json +++ b/shiny/templates/chat/llms/openai/_template.json @@ -14,6 +14,14 @@ { "type": "action", "text": "Learn how to obtain one at https://posit-dev.github.io/chatlas/reference/ChatOpenAI.html" + }, + { + "type": "info", + "text": "Want to learn more about AI chatbots?" + }, + { + "type": "action", + "text": "Visit https://shiny.posit.co/py/docs/genai-chatbots.html" } ] } diff --git a/shiny/templates/chat/llms/playground/_template.json b/shiny/templates/chat/llms/playground/_template.json index 98dbfe728..a0ecb4fad 100644 --- a/shiny/templates/chat/llms/playground/_template.json +++ b/shiny/templates/chat/llms/playground/_template.json @@ -14,6 +14,14 @@ { "type": "action", "text": "Learn how to obtain them at https://posit-dev.github.io/chatlas/reference/" + }, + { + "type": "info", + "text": "Want to learn more about AI chatbots?" + }, + { + "type": "action", + "text": "Visit https://shiny.posit.co/py/docs/genai-chatbots.html" } ] } diff --git a/shiny/templates/chat/starters/sidebar-dark/_template.json b/shiny/templates/chat/starters/sidebar-dark/_template.json deleted file mode 100644 index 78dd6d22d..000000000 --- a/shiny/templates/chat/starters/sidebar-dark/_template.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "type": "app", - "id": "chat-sidebar-dark", - "title": "Chat in a sidebar with dark mode", - "next_steps": [ - "Run the app with `shiny run app.py`." - ] -} diff --git a/shiny/templates/chat/starters/sidebar-dark/app.py b/shiny/templates/chat/starters/sidebar-dark/app.py deleted file mode 100644 index 57b29571c..000000000 --- a/shiny/templates/chat/starters/sidebar-dark/app.py +++ /dev/null @@ -1,30 +0,0 @@ -# -------------------------------------------------------------------------------- -# This example demonstrates Shiny Chat's dark mode capability. -# -------------------------------------------------------------------------------- - -from shiny.express import ui - -# Page options with a dark mode toggle -ui.page_opts( - title=ui.tags.div( - "Hello Dark mode", - ui.input_dark_mode(mode="dark"), - class_="d-flex justify-content-between w-100", - ), - fillable=True, - fillable_mobile=True, -) - -# An empty, closed, sidebar -with ui.sidebar(width=300, style="height:100%", position="right"): - chat = ui.Chat(id="chat", messages=["Welcome to the dark side!"]) - chat.ui(height="100%") - - -# Define a callback to run when the user submits a message -@chat.on_user_submit -async def handle_user_input(user_input: str): - await chat.append_message_stream(f"You said: {user_input}") - - -"Lorem ipsum dolor sit amet, consectetur adipiscing elit" diff --git a/shiny/templates/markdown-stream/llm-enterprise/aws-bedrock-anthropic/_template.json b/shiny/templates/markdown-stream/llm-enterprise/aws-bedrock-anthropic/_template.json new file mode 100644 index 000000000..6461f1770 --- /dev/null +++ b/shiny/templates/markdown-stream/llm-enterprise/aws-bedrock-anthropic/_template.json @@ -0,0 +1,27 @@ +{ + "type": "app", + "id": "stream-ai-anthropic-aws", + "title": "Stream from Anthropic via AWS Bedrock", + "next_steps": [ + "Put your Bedrock credentials in the `template.env` file and rename it to `.env`.", + "Run the app with `shiny run app.py`." + ], + "follow_up": [ + { + "type": "info", + "text": "Need help connecting to Bedrock?" + }, + { + "type": "action", + "text": "Visit https://posit-dev.github.io/chatlas/reference/ChatBedrockAnthropic.html" + }, + { + "type": "info", + "text": "Want to learn more about streaming content?" + }, + { + "type": "action", + "text": "Visit https://shiny.posit.co/py/docs/genai-stream.html" + } + ] +} diff --git a/shiny/templates/markdown-stream/llm-enterprise/aws-bedrock-anthropic/app.py b/shiny/templates/markdown-stream/llm-enterprise/aws-bedrock-anthropic/app.py new file mode 100644 index 000000000..52989f50a --- /dev/null +++ b/shiny/templates/markdown-stream/llm-enterprise/aws-bedrock-anthropic/app.py @@ -0,0 +1,43 @@ +# ------------------------------------------------------------------------------------ +# A basic Shiny MarkdownStream powered by Anthropic's Claude model with Bedrock. +# To run it, you'll need an AWS Bedrock configuration. +# To get started, follow the instructions at https://aws.amazon.com/bedrock/claude/ +# as well as https://github.com/anthropics/anthropic-sdk-python#aws-bedrock +# ------------------------------------------------------------------------------------ +from app_utils import load_dotenv +from chatlas import ChatBedrockAnthropic + +from shiny import reactive +from shiny.express import ui + +# Either explicitly set the AWS environment variables before launching the app, or set +# them in a file named `.env`. The `python-dotenv` package will load `.env` as +# environment variables which can be read by `os.getenv()`. +load_dotenv() +chat_client = ChatBedrockAnthropic( + model="anthropic.claude-3-sonnet-20240229-v1:0", +) + +# Some sidebar input controls to populate a prompt and trigger the stream +with ui.sidebar(): + ui.input_select( + "comic", + "Choose a comedian", + choices=["Jerry Seinfeld", "Ali Wong", "Mitch Hedberg"], + ) + ui.input_action_button("go", "Tell me a joke", class_="btn-primary") + +# Create and display a MarkdownStream() +stream = ui.MarkdownStream(id="my_stream") +stream.ui( + content="Press the button and I'll tell you a joke.", +) + + +# Clicking the button triggers the streaming joke generation +@reactive.effect +@reactive.event(input.go) +async def do_joke(): + prompt = f"Pretend you are {input.comic()} and tell me a funny joke." + response = await chat_client.stream_async(prompt) + await stream.stream(response) diff --git a/shiny/templates/markdown-stream/llm-enterprise/aws-bedrock-anthropic/app_utils.py b/shiny/templates/markdown-stream/llm-enterprise/aws-bedrock-anthropic/app_utils.py new file mode 100644 index 000000000..404a13730 --- /dev/null +++ b/shiny/templates/markdown-stream/llm-enterprise/aws-bedrock-anthropic/app_utils.py @@ -0,0 +1,26 @@ +import os +from pathlib import Path +from typing import Any + +app_dir = Path(__file__).parent +env_file = app_dir / ".env" + + +def load_dotenv(dotenv_path: os.PathLike[str] = env_file, **kwargs: Any) -> None: + """ + A convenience wrapper around `dotenv.load_dotenv` that warns if `dotenv` is not installed. + It also returns `None` to make it easier to ignore the return value. + """ + try: + import dotenv + + dotenv.load_dotenv(dotenv_path=dotenv_path, **kwargs) + except ImportError: + import warnings + + warnings.warn( + "Could not import `dotenv`. If you want to use `.env` files to " + "load environment variables, please install it using " + "`pip install python-dotenv`.", + stacklevel=2, + ) diff --git a/shiny/templates/markdown-stream/llm-enterprise/aws-bedrock-anthropic/requirements.txt b/shiny/templates/markdown-stream/llm-enterprise/aws-bedrock-anthropic/requirements.txt new file mode 100644 index 000000000..8abd3d834 --- /dev/null +++ b/shiny/templates/markdown-stream/llm-enterprise/aws-bedrock-anthropic/requirements.txt @@ -0,0 +1,3 @@ +shiny +dotenv +chatlas[anthropic-bedrock] diff --git a/shiny/templates/markdown-stream/llm-enterprise/aws-bedrock-anthropic/template.env b/shiny/templates/markdown-stream/llm-enterprise/aws-bedrock-anthropic/template.env new file mode 100644 index 000000000..fc4cc092b --- /dev/null +++ b/shiny/templates/markdown-stream/llm-enterprise/aws-bedrock-anthropic/template.env @@ -0,0 +1,6 @@ +# Once you provided your API key, rename this file to .env +# The load_dotenv() in the app.py will then load this env variable +AWS_SECRET_KEY= +AWS_ACCESS_KEY= +AWS_REGION= +AWS_ACCOUNT_ID= diff --git a/shiny/templates/markdown-stream/llm-enterprise/azure-openai/_template.json b/shiny/templates/markdown-stream/llm-enterprise/azure-openai/_template.json new file mode 100644 index 000000000..9fbf00023 --- /dev/null +++ b/shiny/templates/markdown-stream/llm-enterprise/azure-openai/_template.json @@ -0,0 +1,27 @@ +{ + "type": "app", + "id": "stream-ai-azure-openai", + "title": "Stream from OpenAI via Azure", + "next_steps": [ + "Put your Azure credentials in the `template.env` file and rename it to `.env`.", + "Run the app with `shiny run app.py`." + ], + "follow_up": [ + { + "type": "info", + "text": "Need help connecting to Azure?" + }, + { + "type": "action", + "text": "Visit https://posit-dev.github.io/chatlas/reference/ChatAzureOpenAI.html" + }, + { + "type": "info", + "text": "Want to learn more about streaming content?" + }, + { + "type": "action", + "text": "Learn more at https://shiny.posit.co/py/docs/genai-stream.html" + } + ] +} diff --git a/shiny/templates/markdown-stream/llm-enterprise/azure-openai/app.py b/shiny/templates/markdown-stream/llm-enterprise/azure-openai/app.py new file mode 100644 index 000000000..d9631c13d --- /dev/null +++ b/shiny/templates/markdown-stream/llm-enterprise/azure-openai/app.py @@ -0,0 +1,43 @@ +# ------------------------------------------------------------------------------------ +# A basic Shiny MarkdownStream example powered by OpenAI running on Azure. +# ------------------------------------------------------------------------------------ +from app_utils import load_dotenv +from chatlas import ChatAzureOpenAI + +from shiny import reactive +from shiny.express import ui + +# ChatAzureOpenAI() requires an API key from Azure OpenAI. +# See the docs for more information on how to obtain one. +# https://posit-dev.github.io/chatlas/reference/ChatAzureOpenAI.html +load_dotenv() +chat_client = ChatAzureOpenAI( + endpoint="https://my-endpoint.openai.azure.com", + deployment_id="gpt-4o-mini", + api_version="2024-08-01-preview", +) + + +# Some sidebar input controls to populate a prompt and trigger the stream +with ui.sidebar(): + ui.input_select( + "comic", + "Choose a comedian", + choices=["Jerry Seinfeld", "Ali Wong", "Mitch Hedberg"], + ) + ui.input_action_button("go", "Tell me a joke", class_="btn-primary") + +# Create and display a MarkdownStream() +stream = ui.MarkdownStream(id="my_stream") +stream.ui( + content="Press the button and I'll tell you a joke.", +) + + +# Clicking the button triggers the streaming joke generation +@reactive.effect +@reactive.event(input.go) +async def do_joke(): + prompt = f"Pretend you are {input.comic()} and tell me a funny joke." + response = await chat_client.stream_async(prompt) + await stream.stream(response) diff --git a/shiny/templates/markdown-stream/llm-enterprise/azure-openai/app_utils.py b/shiny/templates/markdown-stream/llm-enterprise/azure-openai/app_utils.py new file mode 100644 index 000000000..404a13730 --- /dev/null +++ b/shiny/templates/markdown-stream/llm-enterprise/azure-openai/app_utils.py @@ -0,0 +1,26 @@ +import os +from pathlib import Path +from typing import Any + +app_dir = Path(__file__).parent +env_file = app_dir / ".env" + + +def load_dotenv(dotenv_path: os.PathLike[str] = env_file, **kwargs: Any) -> None: + """ + A convenience wrapper around `dotenv.load_dotenv` that warns if `dotenv` is not installed. + It also returns `None` to make it easier to ignore the return value. + """ + try: + import dotenv + + dotenv.load_dotenv(dotenv_path=dotenv_path, **kwargs) + except ImportError: + import warnings + + warnings.warn( + "Could not import `dotenv`. If you want to use `.env` files to " + "load environment variables, please install it using " + "`pip install python-dotenv`.", + stacklevel=2, + ) diff --git a/shiny/templates/markdown-stream/llm-enterprise/azure-openai/requirements.txt b/shiny/templates/markdown-stream/llm-enterprise/azure-openai/requirements.txt new file mode 100644 index 000000000..e7f23a5c4 --- /dev/null +++ b/shiny/templates/markdown-stream/llm-enterprise/azure-openai/requirements.txt @@ -0,0 +1,3 @@ +shiny +dotenv +chatlas[openai] diff --git a/shiny/templates/markdown-stream/llm-enterprise/azure-openai/template.env b/shiny/templates/markdown-stream/llm-enterprise/azure-openai/template.env new file mode 100644 index 000000000..017e4200e --- /dev/null +++ b/shiny/templates/markdown-stream/llm-enterprise/azure-openai/template.env @@ -0,0 +1,3 @@ +# Once you provided your API key, rename this file to .env +# The load_dotenv() in the app.py will then load this env variable +AZURE_OPENAI_API_KEY= diff --git a/shiny/templates/markdown-stream/llms/anthropic/_template.json b/shiny/templates/markdown-stream/llms/anthropic/_template.json new file mode 100644 index 000000000..1ecfc1af4 --- /dev/null +++ b/shiny/templates/markdown-stream/llms/anthropic/_template.json @@ -0,0 +1,27 @@ +{ + "type": "app", + "id": "stream-ai-anthropic", + "title": "Stream from Anthropic", + "next_steps": [ + "Put your Anthropic API key in the `template.env` file and rename it to `.env`.", + "Run the app with `shiny run app.py`." + ], + "follow_up": [ + { + "type": "info", + "text": "Need help obtaining an API key?" + }, + { + "type": "action", + "text": "Learn how to obtain one at https://posit-dev.github.io/chatlas/reference/ChatAnthropic.html" + }, + { + "type": "info", + "text": "Want to learn more about streaming content?" + }, + { + "type": "action", + "text": "Visit https://shiny.posit.co/py/docs/genai-stream.html" + } + ] +} diff --git a/shiny/templates/markdown-stream/llms/anthropic/app.py b/shiny/templates/markdown-stream/llms/anthropic/app.py new file mode 100644 index 000000000..c5bf55cf4 --- /dev/null +++ b/shiny/templates/markdown-stream/llms/anthropic/app.py @@ -0,0 +1,38 @@ +# ------------------------------------------------------------------------------------ +# A basic Shiny MarkdownStream example powered by Anthropic. +# ------------------------------------------------------------------------------------ +from app_utils import load_dotenv +from chatlas import ChatAnthropic + +from shiny import reactive +from shiny.express import input, ui + +# ChatAnthropic() requires an API key from Anthropic. +# See the docs for more information on how to obtain one. +# https://posit-dev.github.io/chatlas/reference/ChatAnthropic.html +load_dotenv() +chat_client = ChatAnthropic() + +# Some sidebar input controls to populate a prompt and trigger the stream +with ui.sidebar(): + ui.input_select( + "comic", + "Choose a comedian", + choices=["Jerry Seinfeld", "Ali Wong", "Mitch Hedberg"], + ) + ui.input_action_button("go", "Tell me a joke", class_="btn-primary") + +# Create and display a MarkdownStream() +stream = ui.MarkdownStream(id="my_stream") +stream.ui( + content="Press the button and I'll tell you a joke.", +) + + +# Clicking the button triggers the streaming joke generation +@reactive.effect +@reactive.event(input.go) +async def do_joke(): + prompt = f"Pretend you are {input.comic()} and tell me a funny joke." + response = await chat_client.stream_async(prompt) + await stream.stream(response) diff --git a/shiny/templates/markdown-stream/llms/anthropic/app_utils.py b/shiny/templates/markdown-stream/llms/anthropic/app_utils.py new file mode 100644 index 000000000..404a13730 --- /dev/null +++ b/shiny/templates/markdown-stream/llms/anthropic/app_utils.py @@ -0,0 +1,26 @@ +import os +from pathlib import Path +from typing import Any + +app_dir = Path(__file__).parent +env_file = app_dir / ".env" + + +def load_dotenv(dotenv_path: os.PathLike[str] = env_file, **kwargs: Any) -> None: + """ + A convenience wrapper around `dotenv.load_dotenv` that warns if `dotenv` is not installed. + It also returns `None` to make it easier to ignore the return value. + """ + try: + import dotenv + + dotenv.load_dotenv(dotenv_path=dotenv_path, **kwargs) + except ImportError: + import warnings + + warnings.warn( + "Could not import `dotenv`. If you want to use `.env` files to " + "load environment variables, please install it using " + "`pip install python-dotenv`.", + stacklevel=2, + ) diff --git a/shiny/templates/markdown-stream/llms/anthropic/requirements.txt b/shiny/templates/markdown-stream/llms/anthropic/requirements.txt new file mode 100644 index 000000000..29322a167 --- /dev/null +++ b/shiny/templates/markdown-stream/llms/anthropic/requirements.txt @@ -0,0 +1,3 @@ +shiny +dotenv +chatlas[anthropic] diff --git a/shiny/templates/markdown-stream/llms/anthropic/template.env b/shiny/templates/markdown-stream/llms/anthropic/template.env new file mode 100644 index 000000000..6fe7cf7f7 --- /dev/null +++ b/shiny/templates/markdown-stream/llms/anthropic/template.env @@ -0,0 +1,3 @@ +# Once you provided your API key, rename this file to .env +# The load_dotenv() in the app.py will then load this env variable +ANTHROPIC_API_KEY= diff --git a/shiny/templates/markdown-stream/llms/google/_template.json b/shiny/templates/markdown-stream/llms/google/_template.json new file mode 100644 index 000000000..6c24e2ce6 --- /dev/null +++ b/shiny/templates/markdown-stream/llms/google/_template.json @@ -0,0 +1,27 @@ +{ + "type": "app", + "id": "stream-ai-gemini", + "title": "Stream from Google Gemini", + "next_steps": [ + "Put your Google API key in the `template.env` file and rename it to `.env`.", + "Run the app with `shiny run app.py`." + ], + "follow_up": [ + { + "type": "info", + "text": "Need help obtaining an API key?" + }, + { + "type": "action", + "text": "Learn how to obtain one at https://posit-dev.github.io/chatlas/reference/ChatGoogle.html" + }, + { + "type": "info", + "text": "Want to learn more about streaming content?" + }, + { + "type": "action", + "text": "Visit https://shiny.posit.co/py/docs/genai-stream.html" + } + ] +} diff --git a/shiny/templates/markdown-stream/llms/google/app.py b/shiny/templates/markdown-stream/llms/google/app.py new file mode 100644 index 000000000..6deb8ffea --- /dev/null +++ b/shiny/templates/markdown-stream/llms/google/app.py @@ -0,0 +1,38 @@ +# ------------------------------------------------------------------------------------ +# A basic Shiny MarkdownStream example powered by Google's Gemini. +# ------------------------------------------------------------------------------------ +from app_utils import load_dotenv +from chatlas import ChatGoogle + +from shiny import reactive +from shiny.express import input, ui + +# ChatGoogle() requires an API key from Google. +# See the docs for more information on how to obtain one. +# https://posit-dev.github.io/chatlas/reference/ChatGoogle.html +load_dotenv() +chat_client = ChatGoogle() + +# Some sidebar input controls to populate a prompt and trigger the stream +with ui.sidebar(): + ui.input_select( + "comic", + "Choose a comedian", + choices=["Jerry Seinfeld", "Ali Wong", "Mitch Hedberg"], + ) + ui.input_action_button("go", "Tell me a joke", class_="btn-primary") + +# Create and display a MarkdownStream() +stream = ui.MarkdownStream(id="my_stream") +stream.ui( + content="Press the button and I'll tell you a joke.", +) + + +# Clicking the button triggers the streaming joke generation +@reactive.effect +@reactive.event(input.go) +async def do_joke(): + prompt = f"Pretend you are {input.comic()} and tell me a funny joke." + response = await chat_client.stream_async(prompt) + await stream.stream(response) diff --git a/shiny/templates/markdown-stream/llms/google/app_utils.py b/shiny/templates/markdown-stream/llms/google/app_utils.py new file mode 100644 index 000000000..404a13730 --- /dev/null +++ b/shiny/templates/markdown-stream/llms/google/app_utils.py @@ -0,0 +1,26 @@ +import os +from pathlib import Path +from typing import Any + +app_dir = Path(__file__).parent +env_file = app_dir / ".env" + + +def load_dotenv(dotenv_path: os.PathLike[str] = env_file, **kwargs: Any) -> None: + """ + A convenience wrapper around `dotenv.load_dotenv` that warns if `dotenv` is not installed. + It also returns `None` to make it easier to ignore the return value. + """ + try: + import dotenv + + dotenv.load_dotenv(dotenv_path=dotenv_path, **kwargs) + except ImportError: + import warnings + + warnings.warn( + "Could not import `dotenv`. If you want to use `.env` files to " + "load environment variables, please install it using " + "`pip install python-dotenv`.", + stacklevel=2, + ) diff --git a/shiny/templates/markdown-stream/llms/google/requirements.txt b/shiny/templates/markdown-stream/llms/google/requirements.txt new file mode 100644 index 000000000..3868e89d2 --- /dev/null +++ b/shiny/templates/markdown-stream/llms/google/requirements.txt @@ -0,0 +1,3 @@ +shiny +dotenv +chatlas[google] diff --git a/shiny/templates/markdown-stream/llms/google/template.env b/shiny/templates/markdown-stream/llms/google/template.env new file mode 100644 index 000000000..b41ee2ba5 --- /dev/null +++ b/shiny/templates/markdown-stream/llms/google/template.env @@ -0,0 +1,3 @@ +# Once you provided your API key, rename this file to .env +# The load_dotenv() in the app.py will then load this env variable +GOOGLE_API_KEY= diff --git a/shiny/templates/markdown-stream/llms/langchain/_template.json b/shiny/templates/markdown-stream/llms/langchain/_template.json new file mode 100644 index 000000000..0d1535bc7 --- /dev/null +++ b/shiny/templates/markdown-stream/llms/langchain/_template.json @@ -0,0 +1,19 @@ +{ + "type": "app", + "id": "stream-ai-langchain", + "title": "Stream from LangChain", + "next_steps": [ + "Put your OpenAI API key in the `template.env` file and rename it to `.env`.", + "Run the app with `shiny run app.py`." + ], + "follow_up": [ + { + "type": "info", + "text": "Want to learn more about AI chatbots?" + }, + { + "type": "action", + "text": "Visit https://shiny.posit.co/py/docs/genai-chatbots.html" + } + ] +} diff --git a/shiny/templates/markdown-stream/llms/langchain/app.py b/shiny/templates/markdown-stream/llms/langchain/app.py new file mode 100644 index 000000000..18ae83bd4 --- /dev/null +++ b/shiny/templates/markdown-stream/llms/langchain/app.py @@ -0,0 +1,46 @@ +# ------------------------------------------------------------------------------------ +# A basic Shiny MarkdownStream example powered by OpenAI via LangChain. +# To run it, you'll need OpenAI API key. +# To get one, follow the instructions at https://platform.openai.com/docs/quickstart +# To use other providers/models via LangChain, see https://python.langchain.com/v0.1/docs/modules/model_io/chat/quick_start/ +# ------------------------------------------------------------------------------------ +from app_utils import load_dotenv +from langchain_openai import ChatOpenAI + +from shiny import reactive +from shiny.express import input, ui + +# Either explicitly set the OPENAI_API_KEY environment variable before launching the +# app, or set them in a file named `.env`. The `python-dotenv` package will load `.env` +# as environment variables which can later be read by `os.getenv()`. +load_dotenv() +chat_client = ChatOpenAI() + +# Some sidebar input controls to populate a prompt and trigger the stream +with ui.sidebar(): + ui.input_select( + "comic", + "Choose a comedian", + choices=["Jerry Seinfeld", "Ali Wong", "Mitch Hedberg"], + ) + ui.input_action_button("go", "Tell me a joke", class_="btn-primary") + +# Create and display a MarkdownStream() +stream = ui.MarkdownStream(id="my_stream") +stream.ui( + content="Press the button and I'll tell you a joke.", +) + + +# Clicking the button triggers the streaming joke generation +@reactive.effect +@reactive.event(input.go) +async def do_joke(): + prompt = f"Pretend you are {input.comic()} and tell me a funny joke." + response = chat_client.astream(prompt) + + async def stream_wrapper(): + async for item in response: + yield item.content + + await stream.stream(stream_wrapper()) diff --git a/shiny/templates/markdown-stream/llms/langchain/app_utils.py b/shiny/templates/markdown-stream/llms/langchain/app_utils.py new file mode 100644 index 000000000..404a13730 --- /dev/null +++ b/shiny/templates/markdown-stream/llms/langchain/app_utils.py @@ -0,0 +1,26 @@ +import os +from pathlib import Path +from typing import Any + +app_dir = Path(__file__).parent +env_file = app_dir / ".env" + + +def load_dotenv(dotenv_path: os.PathLike[str] = env_file, **kwargs: Any) -> None: + """ + A convenience wrapper around `dotenv.load_dotenv` that warns if `dotenv` is not installed. + It also returns `None` to make it easier to ignore the return value. + """ + try: + import dotenv + + dotenv.load_dotenv(dotenv_path=dotenv_path, **kwargs) + except ImportError: + import warnings + + warnings.warn( + "Could not import `dotenv`. If you want to use `.env` files to " + "load environment variables, please install it using " + "`pip install python-dotenv`.", + stacklevel=2, + ) diff --git a/shiny/templates/markdown-stream/llms/langchain/requirements.txt b/shiny/templates/markdown-stream/llms/langchain/requirements.txt new file mode 100644 index 000000000..bb1f1f4aa --- /dev/null +++ b/shiny/templates/markdown-stream/llms/langchain/requirements.txt @@ -0,0 +1,3 @@ +shiny +python-dotenv +langchain-openai diff --git a/shiny/templates/markdown-stream/llms/langchain/template.env b/shiny/templates/markdown-stream/llms/langchain/template.env new file mode 100644 index 000000000..33b6abc3f --- /dev/null +++ b/shiny/templates/markdown-stream/llms/langchain/template.env @@ -0,0 +1,3 @@ +# Once you provided your API key, rename this file to .env +# The load_dotenv() in the app.py will then load this env variable +OPENAI_API_KEY= diff --git a/shiny/templates/markdown-stream/llms/ollama/_template.json b/shiny/templates/markdown-stream/llms/ollama/_template.json new file mode 100644 index 000000000..0d8b3eac7 --- /dev/null +++ b/shiny/templates/markdown-stream/llms/ollama/_template.json @@ -0,0 +1,28 @@ +{ + "type": "app", + "id": "stream-ai-ollama", + "title": "Stream from Ollama", + "next_steps": [ + "If you haven't already, download the Ollama executable from https://ollama.com/", + "Run the executable and download the relevant model (llama3.2)", + "Run the app with `shiny run app.py`." + ], + "follow_up": [ + { + "type": "info", + "text": "Need help connecting to Ollama?" + }, + { + "type": "action", + "text": "Visit https://posit-dev.github.io/chatlas/reference/ChatOllama.html" + }, + { + "type": "info", + "text": "Want to learn more about streaming content?" + }, + { + "type": "action", + "text": "Visit https://shiny.posit.co/py/docs/genai-stream.html" + } + ] +} diff --git a/shiny/templates/markdown-stream/llms/ollama/app.py b/shiny/templates/markdown-stream/llms/ollama/app.py new file mode 100644 index 000000000..ef4bd2fc0 --- /dev/null +++ b/shiny/templates/markdown-stream/llms/ollama/app.py @@ -0,0 +1,36 @@ +# ------------------------------------------------------------------------------------ +# A basic Shiny MarkdownStream example powered by Ollama +# ------------------------------------------------------------------------------------ +from chatlas import ChatOllama + +from shiny import reactive +from shiny.express import input, ui + +# ChatOllama() requires an Ollama model server to be running locally. +# See the docs for more information on how to set up a local Ollama server. +# https://posit-dev.github.io/chatlas/reference/ChatOllama.html +chat_client = ChatOllama() + +# Some sidebar input controls to populate a prompt and trigger the stream +with ui.sidebar(): + ui.input_select( + "comic", + "Choose a comedian", + choices=["Jerry Seinfeld", "Ali Wong", "Mitch Hedberg"], + ) + ui.input_action_button("go", "Tell me a joke", class_="btn-primary") + +# Create and display a MarkdownStream() +stream = ui.MarkdownStream(id="my_stream") +stream.ui( + content="Press the button and I'll tell you a joke.", +) + + +# Clicking the button triggers the streaming joke generation +@reactive.effect +@reactive.event(input.go) +async def do_joke(): + prompt = f"Pretend you are {input.comic()} and tell me a funny joke." + response = await chat_client.stream_async(prompt) + await stream.stream(response) diff --git a/shiny/templates/markdown-stream/llms/ollama/requirements.txt b/shiny/templates/markdown-stream/llms/ollama/requirements.txt new file mode 100644 index 000000000..9078426f7 --- /dev/null +++ b/shiny/templates/markdown-stream/llms/ollama/requirements.txt @@ -0,0 +1,2 @@ +shiny +chatlas[ollama] diff --git a/shiny/templates/markdown-stream/llms/openai/_template.json b/shiny/templates/markdown-stream/llms/openai/_template.json new file mode 100644 index 000000000..7cf31601d --- /dev/null +++ b/shiny/templates/markdown-stream/llms/openai/_template.json @@ -0,0 +1,27 @@ +{ + "type": "app", + "id": "stream-ai-openai", + "title": "Stream from OpenAI", + "next_steps": [ + "Put your OpenAI API key in the `template.env` file and rename it to `.env`.", + "Run the app with `shiny run app.py`." + ], + "follow_up": [ + { + "type": "info", + "text": "Need help obtaining an API key?" + }, + { + "type": "action", + "text": "Learn how to obtain one at https://posit-dev.github.io/chatlas/reference/ChatOpenAI.html" + }, + { + "type": "info", + "text": "Want to learn more about streaming content?" + }, + { + "type": "action", + "text": "Learn more at https://shiny.posit.co/py/docs/genai-stream.html" + } + ] +} diff --git a/shiny/templates/markdown-stream/llms/openai/app.py b/shiny/templates/markdown-stream/llms/openai/app.py new file mode 100644 index 000000000..31f75ac6d --- /dev/null +++ b/shiny/templates/markdown-stream/llms/openai/app.py @@ -0,0 +1,38 @@ +# ------------------------------------------------------------------------------------ +# A basic Shiny MarkdownStream example powered by OpenAI. +# ------------------------------------------------------------------------------------ +from app_utils import load_dotenv +from chatlas import ChatOpenAI + +from shiny import reactive +from shiny.express import input, ui + +# ChatOpenAI() requires an API key from OpenAI. +# See the docs for more information on how to obtain one. +# https://posit-dev.github.io/chatlas/reference/ChatOpenAI.html +load_dotenv() +chat_client = ChatOpenAI() + +# Some sidebar input controls to populate a prompt and trigger the stream +with ui.sidebar(): + ui.input_select( + "comic", + "Choose a comedian", + choices=["Jerry Seinfeld", "Ali Wong", "Mitch Hedberg"], + ) + ui.input_action_button("go", "Tell me a joke", class_="btn-primary") + +# Create and display a MarkdownStream() +stream = ui.MarkdownStream(id="my_stream") +stream.ui( + content="Press the button and I'll tell you a joke.", +) + + +# Clicking the button triggers the streaming joke generation +@reactive.effect +@reactive.event(input.go) +async def do_joke(): + prompt = f"Pretend you are {input.comic()} and tell me a funny joke." + response = await chat_client.stream_async(prompt) + await stream.stream(response) diff --git a/shiny/templates/markdown-stream/llms/openai/app_utils.py b/shiny/templates/markdown-stream/llms/openai/app_utils.py new file mode 100644 index 000000000..404a13730 --- /dev/null +++ b/shiny/templates/markdown-stream/llms/openai/app_utils.py @@ -0,0 +1,26 @@ +import os +from pathlib import Path +from typing import Any + +app_dir = Path(__file__).parent +env_file = app_dir / ".env" + + +def load_dotenv(dotenv_path: os.PathLike[str] = env_file, **kwargs: Any) -> None: + """ + A convenience wrapper around `dotenv.load_dotenv` that warns if `dotenv` is not installed. + It also returns `None` to make it easier to ignore the return value. + """ + try: + import dotenv + + dotenv.load_dotenv(dotenv_path=dotenv_path, **kwargs) + except ImportError: + import warnings + + warnings.warn( + "Could not import `dotenv`. If you want to use `.env` files to " + "load environment variables, please install it using " + "`pip install python-dotenv`.", + stacklevel=2, + ) diff --git a/shiny/templates/markdown-stream/llms/openai/requirements.txt b/shiny/templates/markdown-stream/llms/openai/requirements.txt new file mode 100644 index 000000000..e7f23a5c4 --- /dev/null +++ b/shiny/templates/markdown-stream/llms/openai/requirements.txt @@ -0,0 +1,3 @@ +shiny +dotenv +chatlas[openai] diff --git a/shiny/templates/markdown-stream/llms/openai/template.env b/shiny/templates/markdown-stream/llms/openai/template.env new file mode 100644 index 000000000..33b6abc3f --- /dev/null +++ b/shiny/templates/markdown-stream/llms/openai/template.env @@ -0,0 +1,3 @@ +# Once you provided your API key, rename this file to .env +# The load_dotenv() in the app.py will then load this env variable +OPENAI_API_KEY= diff --git a/shiny/ui/_chat.py b/shiny/ui/_chat.py index 52c23fba2..5dd8661bc 100644 --- a/shiny/ui/_chat.py +++ b/shiny/ui/_chat.py @@ -90,7 +90,7 @@ ] -@add_example(ex_dir="../templates/chat/starters/hello") +@add_example("app-core.py") class Chat: """ Create a chat interface. @@ -1326,7 +1326,7 @@ async def _send_custom_message(self, handler: str, obj: ClientMessage | None): ) -@add_example(ex_dir="../templates/chat/starters/hello") +@add_example("app-express.py") class ChatExpress(Chat): def ui( self, @@ -1377,7 +1377,7 @@ def ui( ) -@add_example(ex_dir="../templates/chat/starters/hello") +@add_example(ex_dir="../api-examples/Chat") def chat_ui( id: str, *,