1
+ import json
2
+ import requests
3
+ import gradio as gr
4
+
5
+ DEFAULT_FUNCTION_PROPERTIES = """
6
+ {
7
+ "type": "object",
8
+ "properties": {
9
+ "location": {
10
+ "type": "string",
11
+ "description": "The city and state, e.g. San Francisco, CA"
12
+ }
13
+ },
14
+ "required": ["location"]
15
+ }
16
+ """ .strip ()
17
+
18
+ def chat_with_model (message , history , model_choice , instructions , effort , use_functions ,
19
+ function_name , function_description , function_parameters ,
20
+ use_browser_search , temperature , max_output_tokens , debug_mode ):
21
+
22
+ if not message .strip ():
23
+ return history , ""
24
+
25
+ # Append user message and empty assistant placeholder (idiomatic Gradio pattern)
26
+ history = history + [[message , "" ]]
27
+
28
+ # Build messages list from history (excluding the empty assistant placeholder)
29
+ messages = []
30
+
31
+ # Convert history to messages format (excluding the last empty assistant message)
32
+ for user_msg , assistant_msg in history [:- 1 ]:
33
+ if user_msg :
34
+ messages .append ({
35
+ "type" : "message" ,
36
+ "role" : "user" ,
37
+ "content" : [{"type" : "input_text" , "text" : user_msg }]
38
+ })
39
+ if assistant_msg :
40
+ messages .append ({
41
+ "type" : "message" ,
42
+ "role" : "assistant" ,
43
+ "content" : [{"type" : "output_text" , "text" : assistant_msg }]
44
+ })
45
+
46
+ # Add current user message
47
+ messages .append ({
48
+ "type" : "message" ,
49
+ "role" : "user" ,
50
+ "content" : [{"type" : "input_text" , "text" : message }]
51
+ })
52
+
53
+ # Prepare tools
54
+ tools = []
55
+ if use_functions :
56
+ try :
57
+ tools .append ({
58
+ "type" : "function" ,
59
+ "name" : function_name ,
60
+ "description" : function_description ,
61
+ "parameters" : json .loads (function_parameters ),
62
+ })
63
+ except json .JSONDecodeError :
64
+ pass
65
+
66
+ if use_browser_search :
67
+ tools .append ({"type" : "browser_search" })
68
+
69
+ # Get URL based on model (matching streamlit logic)
70
+ options = ["large" , "small" ]
71
+ URL = ("http://localhost:8081/v1/responses" if model_choice == options [1 ]
72
+ else "http://localhost:8000/v1/responses" )
73
+
74
+ try :
75
+ response = requests .post (
76
+ URL ,
77
+ json = {
78
+ "input" : messages ,
79
+ "stream" : True ,
80
+ "instructions" : instructions ,
81
+ "reasoning" : {"effort" : effort },
82
+ "metadata" : {"__debug" : debug_mode },
83
+ "tools" : tools ,
84
+ "temperature" : temperature ,
85
+ "max_output_tokens" : max_output_tokens ,
86
+ },
87
+ stream = True ,
88
+ )
89
+
90
+ full_content = ""
91
+ text_delta = ""
92
+ current_output_index = 0
93
+ in_reasoning = False
94
+
95
+ for line in response .iter_lines (decode_unicode = True ):
96
+ if not line or not line .startswith ("data:" ):
97
+ continue
98
+ data_str = line [len ("data:" ):].strip ()
99
+ if not data_str :
100
+ continue
101
+
102
+ try :
103
+ data = json .loads (data_str )
104
+ except Exception :
105
+ continue
106
+
107
+ event_type = data .get ("type" , "" )
108
+ output_index = data .get ("output_index" , 0 )
109
+
110
+ if event_type == "response.output_item.added" :
111
+ current_output_index = output_index
112
+ output_type = data .get ("item" , {}).get ("type" , "message" )
113
+ text_delta = ""
114
+
115
+ if output_type == "reasoning" :
116
+ if not in_reasoning :
117
+ full_content += "🤔 **Thinking...**\n "
118
+ in_reasoning = True
119
+ elif output_type == "message" :
120
+ if in_reasoning :
121
+ full_content += "\n \n "
122
+ in_reasoning = False
123
+
124
+ elif event_type == "response.reasoning_text.delta" :
125
+ delta = data .get ("delta" , "" )
126
+ full_content += delta
127
+
128
+ # Update last assistant message (idiomatic Gradio pattern)
129
+ history [- 1 ][1 ] = full_content
130
+ yield history , ""
131
+
132
+ elif event_type == "response.output_text.delta" :
133
+ delta = data .get ("delta" , "" )
134
+ full_content += delta
135
+
136
+ # Update last assistant message (idiomatic Gradio pattern)
137
+ history [- 1 ][1 ] = full_content
138
+ yield history , ""
139
+
140
+ elif event_type == "response.output_item.done" :
141
+ item = data .get ("item" , {})
142
+ if item .get ("type" ) == "function_call" :
143
+ function_call_text = f"\n \n 🔨 Called `{ item .get ('name' )} `\n **Arguments**\n ```json\n { item .get ('arguments' , '' )} \n ```"
144
+ full_content += function_call_text
145
+
146
+ # Update last assistant message (idiomatic Gradio pattern)
147
+ history [- 1 ][1 ] = full_content
148
+ yield history , ""
149
+
150
+ elif item .get ("type" ) == "web_search_call" :
151
+ web_search_text = f"\n \n 🌐 **Web Search**\n ```json\n { json .dumps (item .get ('action' , {}), indent = 2 )} \n ```\n ✅ Done"
152
+ full_content += web_search_text
153
+
154
+ # Update last assistant message (idiomatic Gradio pattern)
155
+ history [- 1 ][1 ] = full_content
156
+ yield history , ""
157
+
158
+ elif event_type == "response.completed" :
159
+ response_data = data .get ("response" , {})
160
+ if debug_mode :
161
+ debug_info = response_data .get ("metadata" , {}).get ("__debug" , "" )
162
+ if debug_info :
163
+ full_content += f"\n \n **Debug**\n ```\n { debug_info } \n ```"
164
+
165
+ # Update last assistant message (idiomatic Gradio pattern)
166
+ history [- 1 ][1 ] = full_content
167
+ yield history , ""
168
+ break
169
+
170
+ # Return final history and empty string to clear textbox
171
+ return history , ""
172
+
173
+ except Exception as e :
174
+ error_message = f"❌ Error: { str (e )} "
175
+ history [- 1 ][1 ] = error_message
176
+ return history , ""
177
+
178
+
179
+ # Create the Gradio interface
180
+ with gr .Blocks (title = "💬 Chatbot" ) as demo :
181
+ gr .Markdown ("# 💬 Chatbot" )
182
+
183
+ with gr .Row ():
184
+ with gr .Column (scale = 3 ):
185
+ chatbot = gr .Chatbot (height = 500 )
186
+
187
+ with gr .Row ():
188
+ msg = gr .Textbox (placeholder = "Type a message..." , scale = 4 , show_label = False )
189
+ send_btn = gr .Button ("Send" , scale = 1 )
190
+
191
+ clear_btn = gr .Button ("Clear Chat" )
192
+
193
+ with gr .Column (scale = 1 ):
194
+ model_choice = gr .Radio (["large" , "small" ], value = "small" , label = "Model" )
195
+
196
+ instructions = gr .Textbox (
197
+ label = "Instructions" ,
198
+ value = "You are a helpful assistant that can answer questions and help with tasks." ,
199
+ lines = 3
200
+ )
201
+
202
+ effort = gr .Radio (["low" , "medium" , "high" ], value = "medium" , label = "Reasoning effort" )
203
+
204
+ gr .Markdown ("#### Functions" )
205
+ use_functions = gr .Checkbox (label = "Use functions" , value = False )
206
+
207
+ with gr .Column (visible = False ) as function_group :
208
+ function_name = gr .Textbox (label = "Function name" , value = "get_weather" )
209
+ function_description = gr .Textbox (
210
+ label = "Function description" ,
211
+ value = "Get the weather for a given city"
212
+ )
213
+ function_parameters = gr .Textbox (
214
+ label = "Function parameters" ,
215
+ value = DEFAULT_FUNCTION_PROPERTIES ,
216
+ lines = 6
217
+ )
218
+
219
+ # Conditional browser search (matching Streamlit logic)
220
+ # In Streamlit: if "show_browser" in st.query_params:
221
+ # For Gradio, we'll always show it (simplified)
222
+ gr .Markdown ("#### Built-in Tools" )
223
+ use_browser_search = gr .Checkbox (label = "Use browser search" , value = False )
224
+
225
+ temperature = gr .Slider (0.0 , 1.0 , value = 1.0 , step = 0.01 , label = "Temperature" )
226
+ max_output_tokens = gr .Slider (1000 , 20000 , value = 1024 , step = 100 , label = "Max output tokens" )
227
+
228
+ debug_mode = gr .Checkbox (label = "Debug mode" , value = False )
229
+
230
+ # Event handlers
231
+ def toggle_function_group (use_funcs ):
232
+ return gr .update (visible = use_funcs )
233
+
234
+ use_functions .change (toggle_function_group , use_functions , function_group )
235
+
236
+ # Chat functionality
237
+ inputs = [msg , chatbot , model_choice , instructions , effort , use_functions ,
238
+ function_name , function_description , function_parameters ,
239
+ use_browser_search , temperature , max_output_tokens , debug_mode ]
240
+
241
+ msg .submit (chat_with_model , inputs , [chatbot , msg ])
242
+ send_btn .click (chat_with_model , inputs , [chatbot , msg ])
243
+ clear_btn .click (lambda : [], outputs = chatbot )
244
+
245
+
246
+ if __name__ == "__main__" :
247
+ demo .launch ()
0 commit comments