|
| 1 | +""" |
| 2 | +Ragbits Chat Example: Tutorial Chat Interface |
| 3 | +
|
| 4 | +This example demonstrates how to use the `ChatInterface` to create a chat application |
| 5 | +
|
| 6 | +It showcases different functionalities including: |
| 7 | +- authentication |
| 8 | +- user settings |
| 9 | +- feedback |
| 10 | +- live updates |
| 11 | +- reference web search results |
| 12 | +- image generation |
| 13 | +
|
| 14 | +It showcases different chat response types including: |
| 15 | +- text responses |
| 16 | +- live updates |
| 17 | +- reference web search results |
| 18 | +- image generation |
| 19 | +
|
| 20 | +To run the script, execute the following command: |
| 21 | +
|
| 22 | + ```bash |
| 23 | + ragbits api run examples.chat.tutorial:MyChat --auth examples.chat.tutorial:get_auth_backend --debug |
| 24 | + ``` |
| 25 | +""" |
| 26 | + |
| 27 | +# /// script |
| 28 | +# requires-python = ">=3.10" |
| 29 | +# dependencies = [ |
| 30 | +# "ragbits-chat", |
| 31 | +# ] |
| 32 | +# /// |
| 33 | +# |
| 34 | + |
| 35 | +import base64 |
| 36 | +from collections.abc import AsyncGenerator |
| 37 | +from pathlib import Path |
| 38 | +from typing import Literal |
| 39 | + |
| 40 | +from pydantic import BaseModel, ConfigDict, Field |
| 41 | + |
| 42 | +from ragbits.agents import Agent, ToolCallResult |
| 43 | +from ragbits.agents.tools.openai import get_image_generation_tool, get_web_search_tool |
| 44 | +from ragbits.chat.auth import ListAuthenticationBackend |
| 45 | +from ragbits.chat.interface import ChatInterface |
| 46 | +from ragbits.chat.interface.forms import FeedbackConfig, UserSettings |
| 47 | +from ragbits.chat.interface.types import ChatContext, ChatResponse, LiveUpdateType, Message |
| 48 | +from ragbits.chat.interface.ui_customization import HeaderCustomization, PageMetaCustomization, UICustomization |
| 49 | +from ragbits.core.llms import LiteLLM, ToolCall |
| 50 | +from ragbits.core.prompt import Prompt |
| 51 | + |
| 52 | + |
| 53 | +class LikeFormExample(BaseModel): |
| 54 | + """A simple example implementation of the like form that demonstrates how to use Pydantic for form definition.""" |
| 55 | + |
| 56 | + model_config = ConfigDict( |
| 57 | + title="Like Form", |
| 58 | + json_schema_serialization_defaults_required=True, |
| 59 | + ) |
| 60 | + |
| 61 | + like_reason: str = Field( |
| 62 | + description="Why do you like this?", |
| 63 | + min_length=1, |
| 64 | + ) |
| 65 | + |
| 66 | + |
| 67 | +class DislikeFormExample(BaseModel): |
| 68 | + """A simple example implementation of the dislike form that demonstrates how to use Pydantic for form definition.""" |
| 69 | + |
| 70 | + model_config = ConfigDict(title="Dislike Form", json_schema_serialization_defaults_required=True) |
| 71 | + |
| 72 | + issue_type: Literal["Incorrect information", "Not helpful", "Unclear", "Other"] = Field( |
| 73 | + description="What was the issue?" |
| 74 | + ) |
| 75 | + feedback: str = Field(description="Please provide more details", min_length=1) |
| 76 | + |
| 77 | + |
| 78 | +class UserSettingsFormExample(BaseModel): |
| 79 | + """A simple example implementation of the chat form that demonstrates how to use Pydantic for form definition.""" |
| 80 | + |
| 81 | + model_config = ConfigDict(title="Chat Form", json_schema_serialization_defaults_required=True) |
| 82 | + |
| 83 | + language: Literal["English", "Polish"] = Field(description="Please select the language", default="English") |
| 84 | + |
| 85 | + |
| 86 | +class GeneralAssistantPromptInput(BaseModel): |
| 87 | + """ |
| 88 | + Input format for the General Assistant Prompt. |
| 89 | + """ |
| 90 | + |
| 91 | + query: str |
| 92 | + language: str |
| 93 | + |
| 94 | + |
| 95 | +class GeneralAssistantPrompt(Prompt[GeneralAssistantPromptInput]): |
| 96 | + """ |
| 97 | + Prompt that responds to user queries using appropriate tools. |
| 98 | + """ |
| 99 | + |
| 100 | + system_prompt = """ |
| 101 | + You are a helpful assistant that is expert in mountain hiking and answers user questions. |
| 102 | + You have access to the following tools: web search and image generation. |
| 103 | +
|
| 104 | + Guidelines: |
| 105 | + 1. Use the web search tool when the user asks for factual information, research, or current events. |
| 106 | + 2. Use the image generation tool when the user asks to create, generate, draw, or produce images. |
| 107 | + 3. The image generation tool generates images in 512x512 resolution. |
| 108 | + 4. Return the image as a base64 encoded string in the response. |
| 109 | + 5. Always select the most appropriate tool based on the user’s request. |
| 110 | + 6. If the user asks explicity for a picture, use only the image generation tool. |
| 111 | + 7. Do not output images in chat. The image will be displayed in the UI. |
| 112 | + 8. Answer in {{ language }} language. |
| 113 | + """ |
| 114 | + |
| 115 | + user_prompt = """ |
| 116 | + {{ query }} |
| 117 | + """ |
| 118 | + |
| 119 | + |
| 120 | +class MyChat(ChatInterface): |
| 121 | + """A simple example implementation of the ChatInterface that demonstrates different response types.""" |
| 122 | + |
| 123 | + ui_customization = UICustomization( |
| 124 | + header=HeaderCustomization(title="Authenticated Tutorial Ragbits Chat", subtitle="by deepsense.ai", logo="🐰"), |
| 125 | + welcome_message=( |
| 126 | + "🔐 **Welcome to Authenticated Tutorial Ragbits Chat!**\n\n" |
| 127 | + "You can ask me **anything** about mountain hiking! \n\n Also I can generate images for you.\n\n" |
| 128 | + "Please log in to start chatting!" |
| 129 | + ), |
| 130 | + meta=PageMetaCustomization(favicon="🔨", page_title="Change me!"), |
| 131 | + ) |
| 132 | + |
| 133 | + feedback_config = FeedbackConfig( |
| 134 | + like_enabled=True, |
| 135 | + like_form=LikeFormExample, |
| 136 | + dislike_enabled=True, |
| 137 | + dislike_form=DislikeFormExample, |
| 138 | + ) |
| 139 | + user_settings = UserSettings(form=UserSettingsFormExample) |
| 140 | + |
| 141 | + conversation_history = True |
| 142 | + show_usage = True |
| 143 | + |
| 144 | + def __init__(self) -> None: |
| 145 | + self.model_name = "gpt-4o-2024-08-06" |
| 146 | + self.llm = LiteLLM(model_name=self.model_name, use_structured_output=True) |
| 147 | + self.agent = Agent( |
| 148 | + llm=self.llm, |
| 149 | + prompt=GeneralAssistantPrompt, |
| 150 | + tools=[ |
| 151 | + get_web_search_tool(self.model_name), |
| 152 | + get_image_generation_tool(self.model_name), |
| 153 | + ], |
| 154 | + ) |
| 155 | + |
| 156 | + @staticmethod |
| 157 | + def _get_tool_display_name(tool_name: str) -> str: |
| 158 | + """Get display name for a tool.""" |
| 159 | + return {"search_web": "🔍 Web Search", "image_generation": "🎨 Image Generator"}.get(tool_name, tool_name) |
| 160 | + |
| 161 | + async def _handle_tool_call(self, response: ToolCall) -> ChatResponse: |
| 162 | + """Handle tool call and return live update.""" |
| 163 | + tool_display_name = self._get_tool_display_name(response.name) |
| 164 | + return self.create_live_update( |
| 165 | + response.id, LiveUpdateType.START, f"Using {tool_display_name}", "Processing your request..." |
| 166 | + ) |
| 167 | + |
| 168 | + async def _handle_tool_result(self, response: ToolCallResult) -> AsyncGenerator[ChatResponse, None]: |
| 169 | + """Handle tool call result and yield appropriate responses.""" |
| 170 | + tool_display_name = self._get_tool_display_name(response.name) |
| 171 | + |
| 172 | + yield self.create_live_update( |
| 173 | + response.id, |
| 174 | + LiveUpdateType.FINISH, |
| 175 | + f"{tool_display_name} completed", |
| 176 | + ) |
| 177 | + |
| 178 | + if response.name == "search_web": |
| 179 | + async for reference in self._extract_web_references(response): |
| 180 | + yield reference |
| 181 | + elif response.name == "image_generation" and response.result.image_path: |
| 182 | + yield await self._create_image_response(response.result.image_path) |
| 183 | + |
| 184 | + async def _extract_web_references(self, response: ToolCallResult) -> AsyncGenerator[ChatResponse, None]: |
| 185 | + """Extract URL citations from web search results.""" |
| 186 | + for item in response.result.output: |
| 187 | + if item.type == "message": |
| 188 | + for content in item.content: |
| 189 | + for annotation in content.annotations: |
| 190 | + if annotation.type == "url_citation" and annotation.title and annotation.url: |
| 191 | + yield self.create_reference(title=annotation.title, url=annotation.url, content="") |
| 192 | + |
| 193 | + async def _create_image_response(self, image_path: Path) -> ChatResponse: |
| 194 | + """Create image response from file path.""" |
| 195 | + with open(image_path, "rb") as image_file: |
| 196 | + image_filename = image_path.name |
| 197 | + base64_image = base64.b64encode(image_file.read()).decode("utf-8") |
| 198 | + return self.create_image_response(image_filename, f"data:image/png;base64,{base64_image}") |
| 199 | + |
| 200 | + async def chat( |
| 201 | + self, |
| 202 | + message: str, |
| 203 | + history: list[Message] | None = None, |
| 204 | + context: ChatContext | None = None, |
| 205 | + ) -> AsyncGenerator[ChatResponse, None]: |
| 206 | + """ |
| 207 | + Example implementation of the ChatInterface. |
| 208 | +
|
| 209 | + Args: |
| 210 | + message: The current user message |
| 211 | + history: Optional list of previous messages in the conversation |
| 212 | + context: Optional context |
| 213 | +
|
| 214 | + Yields: |
| 215 | + ChatResponse objects containing different types of content: |
| 216 | + - Text chunks for the actual response |
| 217 | + - Image responses with base64 data URLs |
| 218 | + - Live updates for tool execution status |
| 219 | + """ |
| 220 | + # Get authenticated user info |
| 221 | + user_info = context.state.get("authenticated_user") if context else None |
| 222 | + |
| 223 | + if not user_info: |
| 224 | + yield self.create_text_response("⚠️ Authentication information not found.") |
| 225 | + return |
| 226 | + |
| 227 | + stream = self.agent.run_streaming( |
| 228 | + GeneralAssistantPromptInput(query=message, language=context.user_settings["language"]) |
| 229 | + ) |
| 230 | + |
| 231 | + async for response in stream: |
| 232 | + match response: |
| 233 | + case str(): |
| 234 | + # Regular text content from the LLM |
| 235 | + if response.strip(): # Only yield non-empty text |
| 236 | + yield self.create_text_response(response) |
| 237 | + |
| 238 | + case ToolCall(): |
| 239 | + yield await self._handle_tool_call(response) |
| 240 | + |
| 241 | + case ToolCallResult(): |
| 242 | + async for result_response in self._handle_tool_result(response): |
| 243 | + yield result_response |
| 244 | + |
| 245 | + |
| 246 | +def get_auth_backend() -> ListAuthenticationBackend: |
| 247 | + """Factory function to create the preferred authentication backend.""" |
| 248 | + users = [ |
| 249 | + { |
| 250 | + "user_id": "8e6c5871-3817-4d62-828f-ef6789de31b9", |
| 251 | + "username": "test", |
| 252 | + "password": "test123", |
| 253 | + |
| 254 | + "full_name": "Test User", |
| 255 | + "roles": ["user"], |
| 256 | + "metadata": {"department": "Test", "clearance_level": "low"}, |
| 257 | + }, |
| 258 | + ] |
| 259 | + |
| 260 | + return ListAuthenticationBackend(users) |
0 commit comments