Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
282 changes: 282 additions & 0 deletions camel/agents/chat_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -1957,6 +1957,26 @@
if reset_memory:
self.init_messages()

def reset_system_message(
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we already has update_system_message, seems no necessary to add this new function

self, content: str, reset_memory: bool = True
) -> None:
"""Reset context to new system message.
Args:
content (str): The new system message.
reset_memory (bool):
Whether to reinitialize conversation messages after appending
additional context. Defaults to True.
"""
self._original_system_message = BaseMessage.make_system_message(
content
)
self._system_message = (
self._generate_system_message_for_output_language()
)
if reset_memory:
self.init_messages()

def reset_to_original_system_message(self) -> None:
r"""Reset system message to original, removing any appended context.
Expand Down Expand Up @@ -5208,3 +5228,265 @@
mcp_server.tool()(get_available_tools)

return mcp_server

@dependencies_required("fastapi")
def to_openai_compatible_server(self) -> Any:
r"""Create an OpenAI-compatible FastAPI server for this ChatAgent.
Returns:
FastAPI: A FastAPI application that can be served to provide
OpenAI-compatible API endpoints for this ChatAgent.
Example:
```python
agent = ChatAgent(model="gpt-4")
app = agent.to_openai_compatible_server()
# Serve with uvicorn
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)
```
"""
import asyncio
import json
import time

from fastapi import FastAPI
from fastapi.responses import JSONResponse, StreamingResponse
from pydantic import BaseModel

# Define Pydantic models for request/response
class ChatMessage(BaseModel):
Comment on lines +5258 to +5259
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

seems defined but not used

role: str
content: str = ""
name: Optional[str] = None
tool_calls: Optional[List[Dict[str, Any]]] = None

app = FastAPI(
title="CAMEL OpenAI-compatible API",
description="OpenAI-compatible API for CAMEL ChatAgent",
)

@app.post("/v1/chat/completions")
async def chat_completions(request_data: dict):
try:
print("\n" + "=" * 80)
print(f"[{time.strftime('%H:%M:%S')}] 📨 Received Request:")
print(json.dumps(request_data, indent=2, ensure_ascii=False))
print("=" * 80)

messages = request_data.get("messages", [])
model = request_data.get("model", "camel-model")
stream = request_data.get("stream", False)
functions = request_data.get("functions")
tools = request_data.get("tools")
Comment on lines +5278 to +5282
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The agent's state persisted across different API requests, which would causing conversations from different clients to mix and memory leaks, would better to add self.reset() and self._external_tool_schemas.clear() at the start of each request?


# Convert OpenAI messages to CAMEL format and record in memory
current_user_message = None
for msg in messages:
msg_role = msg.get("role", "")
msg_content = msg.get("content", "")

if msg_role == "user":
user_msg = BaseMessage.make_user_message(
role_name="User", content=msg_content
)
# Record all but the last user message in memory
# The last user message will be passed to step()
if current_user_message is not None:
self.update_memory(
current_user_message, OpenAIBackendRole.USER
)
current_user_message = user_msg
elif msg_role == "system":
sys_msg = BaseMessage.make_system_message(
role_name="System", content=msg_content
)
self.update_memory(sys_msg, OpenAIBackendRole.SYSTEM)
self.reset_system_message(msg_content, True)
Comment on lines +5301 to +5306
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

should we update system message without clearing memory to preserve conversation history?

elif msg_role == "assistant":
# Record previous assistant messages
assistant_msg = BaseMessage.make_assistant_message(
role_name="Assistant", content=msg_content
)
self.update_memory(
assistant_msg, OpenAIBackendRole.ASSISTANT
)
elif msg_role == "tool":
# Handle tool response messages if needed
tool_call_id = msg.get("tool_call_id", "")
tool_msg = FunctionCallingMessage.make_tool_message(
role_name="Tool",
content=msg_content,
tool_call_id=tool_call_id,
)
self.update_memory(tool_msg, OpenAIBackendRole.TOOL)

# Process tools/functions if provided
if tools or functions:
tools_to_use = tools if tools else functions
# Type guard to ensure tools_to_use is not None
if tools_to_use is not None:
for tool in tools_to_use:
self.add_external_tool(tool)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

seems here would accumulate the tools added to agent, should we clean after one request ended?


# Get the response from the agent
if current_user_message is not None:
if stream:
return StreamingResponse(
_stream_response(
current_user_message, request_data
),
media_type="text/event-stream",
)
else:
agent_response = await self.astep(current_user_message)

print(f"agent_response.info {agent_response.info}")
print(f"agent_response.msgs {agent_response.msgs}")

# Convert CAMEL response to OpenAI format
if not agent_response.msgs:
# Empty response or error
content = "No response generated"
finish_reason = "error"
else:
content = agent_response.msgs[0].content
finish_reason = "stop"

# Check for tool calls
tool_calls_response = None
external_tool_requests = agent_response.info.get(
"external_tool_call_requests"
)
if external_tool_requests:
tool_calls_response = []
for tool_call in external_tool_requests:
tool_calls_response.append(
{
"id": (
tool_call.tool_call_id
or f"call_{int(time.time())}"
),
"type": "function",
"function": {
"name": tool_call.tool_name,
"arguments": json.dumps(
tool_call.args
),
},
}
)
finish_reason = "tool_calls"

usage = agent_response.info.get("usage") or {
"prompt_tokens": agent_response.info.get(
"prompt_tokens", 0
),
"completion_tokens": agent_response.info.get(
"completion_tokens", 0
),
"total_tokens": agent_response.info.get(
"total_tokens", 0
),
}

response = {
"id": agent_response.info.get(
"id", f"chatcmpl-{int(time.time())}"
),
"object": "chat.completion",
"created": int(time.time()),
"model": model,
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": (
content
if not tool_calls_response
else None
),
"tool_calls": tool_calls_response,
},
"finish_reason": finish_reason,
}
],
"usage": usage,
}

print(f"[{time.strftime('%H:%M:%S')}] 💬 Response:")
print(
json.dumps(response, indent=2, ensure_ascii=False)
)
print("=" * 80 + "\n")

return response
else:
# No user message provided
return JSONResponse(
status_code=400,
content={"error": "No user message provided"},
)
except Exception as e:
return JSONResponse(
status_code=500,
content={"error": f"Internal server error: {e!s}"},

Check warning

Code scanning / CodeQL

Information exposure through an exception Medium

Stack trace information
flows to this location and may be exposed to an external user.

Copilot Autofix

AI 7 days ago

The ideal fix is to avoid sending the exception string to the user and instead provide a generic error message such as "Internal server error." To maintain developer visibility, the exception (including stack trace) should be logged using the existing logging facility (get_logger).

Specifically:

  • In the except Exception as e: block on line 5432, change the response at line 5435 to have a generic message.
  • Add a line to log the exception using the project logger (logger.error), capturing the stack trace with exc_info=True.
  • Ensure logger is defined; from the code, get_logger is imported—so obtain a logger instance in this scope if not present.
  • Do not otherwise alter application logic.

All changes must be in the shown snippet(s), i.e., entirely within camel/agents/chat_agent.py.


Suggested changeset 1
camel/agents/chat_agent.py

Autofix patch

Autofix patch
Run the following command in your local git repository to apply this patch
cat << 'EOF' | git apply
diff --git a/camel/agents/chat_agent.py b/camel/agents/chat_agent.py
--- a/camel/agents/chat_agent.py
+++ b/camel/agents/chat_agent.py
@@ -5430,9 +5430,11 @@
                         content={"error": "No user message provided"},
                     )
             except Exception as e:
+                logger = get_logger()  # Ensure logger in scope
+                logger.error("Exception in chat_completions handler", exc_info=True)
                 return JSONResponse(
                     status_code=500,
-                    content={"error": f"Internal server error: {e!s}"},
+                    content={"error": "Internal server error"},
                 )
 
         async def _stream_response(message: BaseMessage, request_data: dict):
EOF
@@ -5430,9 +5430,11 @@
content={"error": "No user message provided"},
)
except Exception as e:
logger = get_logger() # Ensure logger in scope
logger.error("Exception in chat_completions handler", exc_info=True)
return JSONResponse(
status_code=500,
content={"error": f"Internal server error: {e!s}"},
content={"error": "Internal server error"},
)

async def _stream_response(message: BaseMessage, request_data: dict):
Copilot is powered by AI and may make mistakes. Always verify output.
)

async def _stream_response(message: BaseMessage, request_data: dict):
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The streaming implementation awaited the full response first, then split it into words and sent them one-by-one with artificial delays. This defeats the purpose of streaming

# Start a separate task for the agent processing
agent_response = await self.astep(message)

if not agent_response.msgs:
# Stream an error message if no response
error_data = {'error': 'No response generated'}
yield f"data: {json.dumps(error_data)}\n\n"
return

content = agent_response.msgs[0].content
# This provides a good streaming experience without complex
# token handling
words = content.split()

# Send the first event with model info
first_chunk = {
'id': f'chatcmpl-{int(time.time())}',
'object': 'chat.completion.chunk',
'created': int(time.time()),
'model': request_data.get("model", "camel-model"),
'choices': [
{
'index': 0,
'delta': {'role': 'assistant'},
'finish_reason': None,
}
],
}
yield f"data: {json.dumps(first_chunk)}\n\n"

# Stream the content word by word
for i, word in enumerate(words):
# Add space before each word except the first
word_content = word if i == 0 else f" {word}"
word_chunk = {
'choices': [
{
'index': 0,
'delta': {'content': word_content},
'finish_reason': None,
}
]
}
yield f"data: {json.dumps(word_chunk)}\n\n"
await asyncio.sleep(0.05) # Reasonable streaming speed

# Send the final event
final_chunk = {
'choices': [{'index': 0, 'delta': {}, 'finish_reason': 'stop'}]
}
yield f"data: {json.dumps(final_chunk)}\n\n"
yield "data: [DONE]\n\n"

return app
20 changes: 18 additions & 2 deletions camel/messages/func_message.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,11 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
import json
from dataclasses import dataclass
from typing import Any, Dict, Optional
from typing import (
Any,
Dict,
Optional,
)

from camel.messages import (
BaseMessage,
Expand All @@ -30,7 +34,7 @@
from camel.messages.conversion.sharegpt.function_call_formatter import (
FunctionCallFormatter,
)
from camel.types import OpenAIBackendRole
from camel.types import OpenAIBackendRole, RoleType


@dataclass
Expand Down Expand Up @@ -58,6 +62,18 @@ class FunctionCallingMessage(BaseMessage):
tool_call_id: Optional[str] = None
mask_output: Optional[bool] = False

@classmethod
def make_tool_message(
cls, role_name: str, content: str, tool_call_id: str
) -> "FunctionCallingMessage":
return cls(
role_name=role_name,
role_type=RoleType.TOOL,
meta_dict=None,
content=content,
tool_call_id=tool_call_id,
)

def to_openai_message(
self,
role_at_backend: OpenAIBackendRole,
Expand Down
Loading
Loading