@@ -12,7 +12,8 @@ First, create a chat implementation by subclassing `ChatInterface`. Here's a min
12
12
from collections.abc import AsyncGenerator
13
13
14
14
from ragbits.chat.interface import ChatInterface
15
- from ragbits.chat.interface.types import ChatResponse, Message
15
+ from ragbits.chat.interface.types import ChatResponse
16
+ from ragbits.core.prompt import ChatFormat
16
17
from ragbits.core.llms import LiteLLM
17
18
18
19
class MyChat (ChatInterface ):
@@ -22,8 +23,8 @@ class MyChat(ChatInterface):
22
23
async def chat (
23
24
self ,
24
25
message : str ,
25
- history : list[Message] | None = None ,
26
- context : dict | None = None ,
26
+ history : ChatFormat ,
27
+ context : ChatContext ,
27
28
) -> AsyncGenerator[ChatResponse, None ]:
28
29
async for chunk in self .llm.generate_streaming([* history, {" role" : " user" , " content" : message}]):
29
30
yield self .create_text_response(chunk)
@@ -58,7 +59,7 @@ Ragbits Chat supports multiple response types that can be yielded from your `cha
58
59
Text responses are the primary way to stream content to users. Use ` create_text_response() ` to yield text chunks:
59
60
60
61
``` python
61
- async def chat (self , message : str , history : list[Message] | None = None , context : dict | None = None ) -> AsyncGenerator[ChatResponse, None ]:
62
+ async def chat (self , message : str , history : ChatFormat , context : ChatContext ) -> AsyncGenerator[ChatResponse, None ]:
62
63
# Stream response from LLM
63
64
async for chunk in self .llm.generate_streaming([* history, {" role" : " user" , " content" : message}]):
64
65
yield self .create_text_response(chunk)
@@ -69,7 +70,7 @@ async def chat(self, message: str, history: list[Message] | None = None, context
69
70
References allow you to cite sources, documents, or external links that support your response:
70
71
71
72
``` python
72
- async def chat (self , message : str , history : list[Message] | None = None , context : dict | None = None ) -> AsyncGenerator[ChatResponse, None ]:
73
+ async def chat (self , message : str , history : ChatFormat , context : ChatContext ) -> AsyncGenerator[ChatResponse, None ]:
73
74
# Add a reference
74
75
yield self .create_reference(
75
76
title = " Example Reference" ,
@@ -85,7 +86,7 @@ You can include images in your responses using `create_image_response()`:
85
86
``` python
86
87
import uuid
87
88
88
- async def chat (self , message : str , history : list[Message] | None = None , context : dict | None = None ) -> AsyncGenerator[ChatResponse, None ]:
89
+ async def chat (self , message : str , history : ChatFormat , context : ChatContext ) -> AsyncGenerator[ChatResponse, None ]:
89
90
# Add an image to the response
90
91
yield self .create_image_response(
91
92
str (uuid.uuid4()), # Unique identifier for the image
@@ -98,7 +99,7 @@ async def chat(self, message: str, history: list[Message] | None = None, context
98
99
Provide suggested follow-up questions to guide the conversation:
99
100
100
101
``` python
101
- async def chat (self , message : str , history : list[Message] | None = None , context : dict | None = None ) -> AsyncGenerator[ChatResponse, None ]:
102
+ async def chat (self , message : str , history : ChatFormat , context : ChatContext ) -> AsyncGenerator[ChatResponse, None ]:
102
103
# Main response...
103
104
async for chunk in self .llm.generate_streaming([* history, {" role" : " user" , " content" : message}]):
104
105
yield self .create_text_response(chunk)
@@ -121,7 +122,7 @@ Live updates show real-time progress for long-running operations (like web searc
121
122
import asyncio
122
123
from ragbits.chat.interface.types import LiveUpdateType
123
124
124
- async def chat (self , message : str , history : list[Message] | None = None , context : dict | None = None ) -> AsyncGenerator[ChatResponse, None ]:
125
+ async def chat (self , message : str , history : ChatFormat , context : ChatContext ) -> AsyncGenerator[ChatResponse, None ]:
125
126
# Start a live update
126
127
yield self .create_live_update(
127
128
" search_task" , # Unique task ID
@@ -163,12 +164,13 @@ Use `create_state_update()` to store state information that persists across conv
163
164
164
165
``` python
165
166
from ragbits.chat.interface.types import ChatContext
167
+ from ragbits.core.prompt import ChatFormat
166
168
167
169
async def chat (
168
170
self ,
169
171
message : str ,
170
- history : list[Message] | None = None ,
171
- context : ChatContext | None = None
172
+ history : ChatFormat ,
173
+ context : ChatContext
172
174
) -> AsyncGenerator[ChatResponse, None ]:
173
175
# Access existing state from context
174
176
current_state = context.state if context else {}
@@ -393,7 +395,8 @@ from pydantic import BaseModel, ConfigDict, Field
393
395
394
396
from ragbits.chat.interface import ChatInterface
395
397
from ragbits.chat.interface.forms import FeedbackConfig, UserSettings
396
- from ragbits.chat.interface.types import ChatContext, ChatResponse, LiveUpdateType, Message
398
+ from ragbits.chat.interface.types import ChatContext, ChatResponse, LiveUpdateType
399
+ from ragbits.core.prompt import ChatFormat
397
400
from ragbits.chat.interface.ui_customization import HeaderCustomization, PageMetaCustomization, UICustomization
398
401
from ragbits.core.llms import LiteLLM
399
402
@@ -486,8 +489,8 @@ class MyChat(ChatInterface):
486
489
async def chat (
487
490
self ,
488
491
message : str ,
489
- history : list[Message] | None = None ,
490
- context : ChatContext | None = None ,
492
+ history : ChatFormat ,
493
+ context : ChatContext,
491
494
) -> AsyncGenerator[ChatResponse, None ]:
492
495
"""
493
496
Comprehensive chat implementation demonstrating all response types.
0 commit comments