Skip to content

Commit bdbf1d1

Browse files
committed
Add assistant urls and views
1 parent ae28268 commit bdbf1d1

File tree

2 files changed

+222
-0
lines changed

2 files changed

+222
-0
lines changed

server/api/views/assistant/urls.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
from django.urls import path
2+
3+
from .views import Assistant
4+
5+
urlpatterns = [path("v1/api/assistant", Assistant.as_view(), name="assistant")]
Lines changed: 217 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,217 @@
1+
import os
2+
import json
3+
import logging
4+
from typing import Callable
5+
6+
from rest_framework.views import APIView
7+
from rest_framework.response import Response
8+
from rest_framework import status
9+
from rest_framework.permissions import IsAuthenticated
10+
from django.utils.decorators import method_decorator
11+
from django.views.decorators.csrf import csrf_exempt
12+
13+
from openai import OpenAI
14+
15+
from ...services.embedding_services import get_closest_embeddings
16+
from ...services.conversions_services import convert_uuids
17+
18+
# Configure logging
19+
logger = logging.getLogger(__name__)
20+
21+
22+
# Open AI Cookbook: Handling Function Calls with Reasoning Models
23+
# https://cookbook.openai.com/examples/reasoning_function_calls
24+
def invoke_functions_from_response(
25+
response, tool_mapping: dict[str, Callable]
26+
) -> list[dict]:
27+
"""Extract all function calls from the response, look up the corresponding tool function(s) and execute them.
28+
(This would be a good place to handle asynchroneous tool calls, or ones that take a while to execute.)
29+
This returns a list of messages to be added to the conversation history.
30+
31+
Parameters
32+
----------
33+
response : OpenAI Response
34+
The response object from OpenAI containing output items that may include function calls
35+
tool_mapping : dict[str, Callable]
36+
A dictionary mapping function names (as strings) to their corresponding Python functions.
37+
Keys should match the function names defined in the tools schema.
38+
39+
Returns
40+
-------
41+
list[dict]
42+
List of function call output messages formatted for the OpenAI conversation.
43+
Each message contains:
44+
- type: "function_call_output"
45+
- call_id: The unique identifier for the function call
46+
- output: The result returned by the executed function (string or error message)
47+
"""
48+
intermediate_messages = []
49+
for response_item in response.output:
50+
if response_item.type == "function_call":
51+
target_tool = tool_mapping.get(response_item.name)
52+
if target_tool:
53+
try:
54+
arguments = json.loads(response_item.arguments)
55+
logger.info(
56+
f"Invoking tool: {response_item.name} with arguments: {arguments}"
57+
)
58+
tool_output = target_tool(**arguments)
59+
logger.debug(f"Tool {response_item.name} completed successfully")
60+
except Exception as e:
61+
msg = f"Error executing function call: {response_item.name}: {e}"
62+
tool_output = msg
63+
logger.error(msg, exc_info=True)
64+
else:
65+
msg = f"ERROR - No tool registered for function call: {response_item.name}"
66+
tool_output = msg
67+
logger.error(msg)
68+
intermediate_messages.append(
69+
{
70+
"type": "function_call_output",
71+
"call_id": response_item.call_id,
72+
"output": tool_output,
73+
}
74+
)
75+
elif response_item.type == "reasoning":
76+
logger.debug("Reasoning step")
77+
return intermediate_messages
78+
79+
80+
@method_decorator(csrf_exempt, name="dispatch")
81+
class Assistant(APIView):
82+
permission_classes = [IsAuthenticated]
83+
84+
def post(self, request):
85+
try:
86+
user = request.user
87+
88+
client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
89+
90+
TOOL_DESCRIPTION = """
91+
Search through the user's uploaded documents using semantic similarity matching.
92+
This function finds the most relevant document chunks based on the input query and
93+
returns contextual information including page numbers, chunk locations, and similarity scores.
94+
Use this to answer the user's questions.
95+
"""
96+
97+
TOOL_PROPERTY_DESCRIPTION = """
98+
The search query to find semantically similar content in uploaded documents.
99+
Should be a natural language question or keyword phrase.
100+
"""
101+
102+
tools = [
103+
{
104+
"type": "function",
105+
"name": "search_documents",
106+
"description": TOOL_DESCRIPTION,
107+
"parameters": {
108+
"type": "object",
109+
"properties": {
110+
"query": {
111+
"type": "string",
112+
"description": TOOL_PROPERTY_DESCRIPTION,
113+
}
114+
},
115+
"required": ["query"],
116+
},
117+
}
118+
]
119+
120+
def search_documents(query: str, user=user) -> str:
121+
"""
122+
Search through user's uploaded documents using semantic similarity.
123+
124+
This function performs vector similarity search against the user's document corpus
125+
and returns formatted results with context information for the LLM to use.
126+
127+
Parameters
128+
----------
129+
query : str
130+
The search query string
131+
user : User
132+
The authenticated user whose documents to search
133+
134+
Returns
135+
-------
136+
str
137+
Formatted search results containing document excerpts with metadata
138+
139+
Raises
140+
------
141+
Exception
142+
If embedding search fails
143+
"""
144+
145+
try:
146+
embeddings_results = get_closest_embeddings(
147+
user=user, message_data=query.strip()
148+
)
149+
embeddings_results = convert_uuids(embeddings_results)
150+
151+
if not embeddings_results:
152+
return "No relevant documents found for your query. Please try different search terms or upload documents first."
153+
154+
# Format results with clear structure and metadata
155+
prompt_texts = [
156+
f"[Document {i + 1} - File: {obj['file_id']}, Page: {obj['page_number']}, Chunk: {obj['chunk_number']}, Similarity: {1 - obj['distance']:.3f}]\n{obj['text']}\n[End Document {i + 1}]"
157+
for i, obj in enumerate(embeddings_results)
158+
]
159+
160+
return "\n\n".join(prompt_texts)
161+
162+
except Exception as e:
163+
return f"Error searching documents: {str(e)}. Please try again if the issue persists."
164+
165+
MODEL_DEFAULTS = {
166+
"model": "gpt-5-nano", # 400,000 token context window
167+
"reasoning": {"effort": "medium"},
168+
"tools": tools,
169+
}
170+
171+
# We fetch a response and then kick off a loop to handle the response
172+
173+
request_data = request.data.get("message", None)
174+
if not request_data:
175+
return Response(
176+
{"error": "Message data is required."},
177+
status=status.HTTP_400_BAD_REQUEST,
178+
)
179+
message = str(request_data)
180+
181+
response = client.responses.create(
182+
input=[{"type": "text", "text": message}], **MODEL_DEFAULTS
183+
)
184+
185+
# Open AI Cookbook: Handling Function Calls with Reasoning Models
186+
# https://cookbook.openai.com/examples/reasoning_function_calls
187+
while True:
188+
# Mapping of the tool names we tell the model about and the functions that implement them
189+
function_responses = invoke_functions_from_response(
190+
response, tool_mapping={"search_documents": search_documents}
191+
)
192+
if len(function_responses) == 0: # We're done reasoning
193+
logger.info(f"Reasoning completed for user {user.id}")
194+
final_response = response.output_text
195+
logger.debug(
196+
f"Final response length: {len(final_response)} characters"
197+
)
198+
break
199+
else:
200+
logger.debug("More reasoning required, continuing...")
201+
response = client.responses.create(
202+
input=function_responses,
203+
previous_response_id=response.id,
204+
**MODEL_DEFAULTS,
205+
)
206+
207+
return Response({"response": final_response}, status=status.HTTP_200_OK)
208+
209+
except Exception as e:
210+
logger.error(
211+
f"Unexpected error in Assistant view for user {request.user.id if hasattr(request, 'user') else 'unknown'}: {e}",
212+
exc_info=True,
213+
)
214+
return Response(
215+
{"error": "An unexpected error occurred. Please try again later."},
216+
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
217+
)

0 commit comments

Comments
 (0)