diff --git a/camel/agents/chat_agent.py b/camel/agents/chat_agent.py index 93774568d6..3e41d19ab8 100644 --- a/camel/agents/chat_agent.py +++ b/camel/agents/chat_agent.py @@ -1957,6 +1957,26 @@ def append_to_system_message( if reset_memory: self.init_messages() + def reset_system_message( + self, content: str, reset_memory: bool = True + ) -> None: + """Reset context to new system message. + + Args: + content (str): The new system message. + reset_memory (bool): + Whether to reinitialize conversation messages after appending + additional context. Defaults to True. + """ + self._original_system_message = BaseMessage.make_system_message( + content + ) + self._system_message = ( + self._generate_system_message_for_output_language() + ) + if reset_memory: + self.init_messages() + def reset_to_original_system_message(self) -> None: r"""Reset system message to original, removing any appended context. @@ -5208,3 +5228,265 @@ def get_available_tools(): mcp_server.tool()(get_available_tools) return mcp_server + + @dependencies_required("fastapi") + def to_openai_compatible_server(self) -> Any: + r"""Create an OpenAI-compatible FastAPI server for this ChatAgent. + + Returns: + FastAPI: A FastAPI application that can be served to provide + OpenAI-compatible API endpoints for this ChatAgent. + + Example: + ```python + agent = ChatAgent(model="gpt-4") + app = agent.to_openai_compatible_server() + + # Serve with uvicorn + import uvicorn + uvicorn.run(app, host="0.0.0.0", port=8000) + ``` + """ + import asyncio + import json + import time + + from fastapi import FastAPI + from fastapi.responses import JSONResponse, StreamingResponse + from pydantic import BaseModel + + # Define Pydantic models for request/response + class ChatMessage(BaseModel): + role: str + content: str = "" + name: Optional[str] = None + tool_calls: Optional[List[Dict[str, Any]]] = None + + app = FastAPI( + title="CAMEL OpenAI-compatible API", + description="OpenAI-compatible API for CAMEL ChatAgent", + ) + + @app.post("/v1/chat/completions") + async def chat_completions(request_data: dict): + try: + print("\n" + "=" * 80) + print(f"[{time.strftime('%H:%M:%S')}] πŸ“¨ Received Request:") + print(json.dumps(request_data, indent=2, ensure_ascii=False)) + print("=" * 80) + + messages = request_data.get("messages", []) + model = request_data.get("model", "camel-model") + stream = request_data.get("stream", False) + functions = request_data.get("functions") + tools = request_data.get("tools") + + # Convert OpenAI messages to CAMEL format and record in memory + current_user_message = None + for msg in messages: + msg_role = msg.get("role", "") + msg_content = msg.get("content", "") + + if msg_role == "user": + user_msg = BaseMessage.make_user_message( + role_name="User", content=msg_content + ) + # Record all but the last user message in memory + # The last user message will be passed to step() + if current_user_message is not None: + self.update_memory( + current_user_message, OpenAIBackendRole.USER + ) + current_user_message = user_msg + elif msg_role == "system": + sys_msg = BaseMessage.make_system_message( + role_name="System", content=msg_content + ) + self.update_memory(sys_msg, OpenAIBackendRole.SYSTEM) + self.reset_system_message(msg_content, True) + elif msg_role == "assistant": + # Record previous assistant messages + assistant_msg = BaseMessage.make_assistant_message( + role_name="Assistant", content=msg_content + ) + self.update_memory( + assistant_msg, OpenAIBackendRole.ASSISTANT + ) + elif msg_role == "tool": + # Handle tool response messages if needed + tool_call_id = msg.get("tool_call_id", "") + tool_msg = FunctionCallingMessage.make_tool_message( + role_name="Tool", + content=msg_content, + tool_call_id=tool_call_id, + ) + self.update_memory(tool_msg, OpenAIBackendRole.TOOL) + + # Process tools/functions if provided + if tools or functions: + tools_to_use = tools if tools else functions + # Type guard to ensure tools_to_use is not None + if tools_to_use is not None: + for tool in tools_to_use: + self.add_external_tool(tool) + + # Get the response from the agent + if current_user_message is not None: + if stream: + return StreamingResponse( + _stream_response( + current_user_message, request_data + ), + media_type="text/event-stream", + ) + else: + agent_response = await self.astep(current_user_message) + + print(f"agent_response.info {agent_response.info}") + print(f"agent_response.msgs {agent_response.msgs}") + + # Convert CAMEL response to OpenAI format + if not agent_response.msgs: + # Empty response or error + content = "No response generated" + finish_reason = "error" + else: + content = agent_response.msgs[0].content + finish_reason = "stop" + + # Check for tool calls + tool_calls_response = None + external_tool_requests = agent_response.info.get( + "external_tool_call_requests" + ) + if external_tool_requests: + tool_calls_response = [] + for tool_call in external_tool_requests: + tool_calls_response.append( + { + "id": ( + tool_call.tool_call_id + or f"call_{int(time.time())}" + ), + "type": "function", + "function": { + "name": tool_call.tool_name, + "arguments": json.dumps( + tool_call.args + ), + }, + } + ) + finish_reason = "tool_calls" + + usage = agent_response.info.get("usage") or { + "prompt_tokens": agent_response.info.get( + "prompt_tokens", 0 + ), + "completion_tokens": agent_response.info.get( + "completion_tokens", 0 + ), + "total_tokens": agent_response.info.get( + "total_tokens", 0 + ), + } + + response = { + "id": agent_response.info.get( + "id", f"chatcmpl-{int(time.time())}" + ), + "object": "chat.completion", + "created": int(time.time()), + "model": model, + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": ( + content + if not tool_calls_response + else None + ), + "tool_calls": tool_calls_response, + }, + "finish_reason": finish_reason, + } + ], + "usage": usage, + } + + print(f"[{time.strftime('%H:%M:%S')}] πŸ’¬ Response:") + print( + json.dumps(response, indent=2, ensure_ascii=False) + ) + print("=" * 80 + "\n") + + return response + else: + # No user message provided + return JSONResponse( + status_code=400, + content={"error": "No user message provided"}, + ) + except Exception as e: + return JSONResponse( + status_code=500, + content={"error": f"Internal server error: {e!s}"}, + ) + + async def _stream_response(message: BaseMessage, request_data: dict): + # Start a separate task for the agent processing + agent_response = await self.astep(message) + + if not agent_response.msgs: + # Stream an error message if no response + error_data = {'error': 'No response generated'} + yield f"data: {json.dumps(error_data)}\n\n" + return + + content = agent_response.msgs[0].content + # This provides a good streaming experience without complex + # token handling + words = content.split() + + # Send the first event with model info + first_chunk = { + 'id': f'chatcmpl-{int(time.time())}', + 'object': 'chat.completion.chunk', + 'created': int(time.time()), + 'model': request_data.get("model", "camel-model"), + 'choices': [ + { + 'index': 0, + 'delta': {'role': 'assistant'}, + 'finish_reason': None, + } + ], + } + yield f"data: {json.dumps(first_chunk)}\n\n" + + # Stream the content word by word + for i, word in enumerate(words): + # Add space before each word except the first + word_content = word if i == 0 else f" {word}" + word_chunk = { + 'choices': [ + { + 'index': 0, + 'delta': {'content': word_content}, + 'finish_reason': None, + } + ] + } + yield f"data: {json.dumps(word_chunk)}\n\n" + await asyncio.sleep(0.05) # Reasonable streaming speed + + # Send the final event + final_chunk = { + 'choices': [{'index': 0, 'delta': {}, 'finish_reason': 'stop'}] + } + yield f"data: {json.dumps(final_chunk)}\n\n" + yield "data: [DONE]\n\n" + + return app diff --git a/camel/messages/func_message.py b/camel/messages/func_message.py index 2767e1c0cc..cd642c6803 100644 --- a/camel/messages/func_message.py +++ b/camel/messages/func_message.py @@ -13,7 +13,11 @@ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= import json from dataclasses import dataclass -from typing import Any, Dict, Optional +from typing import ( + Any, + Dict, + Optional, +) from camel.messages import ( BaseMessage, @@ -30,7 +34,7 @@ from camel.messages.conversion.sharegpt.function_call_formatter import ( FunctionCallFormatter, ) -from camel.types import OpenAIBackendRole +from camel.types import OpenAIBackendRole, RoleType @dataclass @@ -58,6 +62,18 @@ class FunctionCallingMessage(BaseMessage): tool_call_id: Optional[str] = None mask_output: Optional[bool] = False + @classmethod + def make_tool_message( + cls, role_name: str, content: str, tool_call_id: str + ) -> "FunctionCallingMessage": + return cls( + role_name=role_name, + role_type=RoleType.TOOL, + meta_dict=None, + content=content, + tool_call_id=tool_call_id, + ) + def to_openai_message( self, role_at_backend: OpenAIBackendRole, diff --git a/camel/schemas/openai_schema.py b/camel/schemas/openai_schema.py new file mode 100644 index 0000000000..4cfb65d120 --- /dev/null +++ b/camel/schemas/openai_schema.py @@ -0,0 +1,94 @@ +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= + + +# examples/agents/openai-server/openai_schema.py +from typing import Any, Dict, List, Literal, Optional, Union + +from pydantic import BaseModel + + +class ChatCompletionMessage(BaseModel): + role: str + content: Optional[str] = None + name: Optional[str] = None + refusal: Optional[str] = None + tool_calls: Optional[List[Dict[str, Any]]] = None + tool_call_id: Optional[str] = None + + +class ChatCompletionRequest(BaseModel): + model: str + messages: List[ChatCompletionMessage] + audio: Optional[Dict[str, Any]] = None + frequency_penalty: Optional[float] = None + function_call: Optional[Dict[str, Any]] = None + functions: Optional[List[Dict[str, Any]]] = None + logit_bias: Optional[Dict[str, int]] = None + logprobs: Optional[bool] = None + max_completion_tokens: Optional[int] = None + max_tokens: Optional[int] = None + metadata: Optional[Dict[str, Any]] = None + modalities: Optional[List[Literal["text", "audio"]]] = None + n: Optional[int] = None + parallel_tool_calls: Optional[bool] = None + prediction: Optional[Dict[str, Any]] = None + presence_penalty: Optional[float] = None + prompt_cache_key: Optional[str] = None + reasoning_effort: Optional[str] = None + response_format: Optional[Dict[str, Any]] = None + safety_identifier: Optional[str] = None + seed: Optional[int] = None + service_tier: Optional[ + Literal["auto", "default", "flex", "scale", "priority"] + ] = None + stop: Optional[Union[str, List[str]]] = None + store: Optional[bool] = None + stream: Optional[bool] = False + stream_options: Optional[Dict[str, Any]] = None + temperature: Optional[float] = 1.0 + tool_choice: Optional[Any] = None + tools: Optional[List[Dict[str, Any]]] = None + top_logprobs: Optional[int] = None + top_p: Optional[float] = 1.0 + user: Optional[str] = None + verbosity: Optional[Literal["low", "medium", "high"]] = None + web_search_options: Optional[Dict[str, Any]] = None + + +class ChatCompletionChoice(BaseModel): + index: int + message: ChatCompletionMessage + finish_reason: Literal[ + "stop", "length", "tool_calls", "content_filter", "function_call" + ] + + +class ChatCompletionUsage(BaseModel): + prompt_tokens: int + completion_tokens: int + total_tokens: int + + +class ChatCompletionResponse(BaseModel): + id: str + object: Literal["chat.completion"] = "chat.completion" + created: int + model: str + choices: List[ChatCompletionChoice] + service_tier: Optional[ + Literal["auto", "default", "flex", "scale", "priority"] + ] = None + system_fingerprint: Optional[str] = None + usage: Optional[ChatCompletionUsage] = None diff --git a/camel/types/enums.py b/camel/types/enums.py index dcba68cf53..1c5feed1f2 100644 --- a/camel/types/enums.py +++ b/camel/types/enums.py @@ -25,6 +25,7 @@ class RoleType(Enum): ASSISTANT = "assistant" USER = "user" SYSTEM = "system" + TOOL = "tool" CRITIC = "critic" EMBODIMENT = "embodiment" DEFAULT = "default" diff --git a/examples/agents/openai-server/README_OPENAI_SERVER.md b/examples/agents/openai-server/README_OPENAI_SERVER.md new file mode 100644 index 0000000000..27424873bb --- /dev/null +++ b/examples/agents/openai-server/README_OPENAI_SERVER.md @@ -0,0 +1,278 @@ +# OpenAI-Compatible Server for CAMEL ChatAgent + +The `ChatAgent` class now includes a `to_openai_compatible_server()` method that creates a FastAPI application with OpenAI-compatible endpoints. This allows you to serve your CAMEL ChatAgent as a drop-in replacement for OpenAI's API. + +## Features + +- **OpenAI-Compatible API**: Supports the `/v1/chat/completions` endpoint +- **Streaming Support**: Supports both regular and streaming responses +- **Tool/Function Calling**: Supports OpenAI-style function calling +- **Easy Integration**: Drop-in replacement for OpenAI API clients + +## Installation + +Make sure you have the required dependencies: + +```bash +pip install fastapi uvicorn +``` + +## Basic Usage + +### Method 1: Direct Server Creation + +```python +from camel.agents import ChatAgent +import uvicorn + +# Create a ChatAgent +agent = ChatAgent( + system_message="You are a helpful assistant.", + model="gpt-4o-mini" # or any supported model +) + +# Create the FastAPI server +app = agent.to_openai_compatible_server() + +# Serve the application +uvicorn.run(app, host="0.0.0.0", port=8000) +``` + +### Method 2: Using the Example Script + +```bash +# Run the example script +python example_openai_server.py + +# Or use uvicorn command line +uvicorn example_openai_server:app --host 0.0.0.0 --port 8000 +``` + +## Testing the Server + +### Quick Test with Simple Client + +Use the provided simple client example: + +```bash +# Start the server (in one terminal) +python example_openai_server.py + +# Test the server (in another terminal) +python simple_client_example.py +``` + +### Comprehensive Testing + +For more thorough testing including function calling and error handling: + +```bash +# Start the server (in one terminal) +python example_openai_server.py + +# Run comprehensive tests (in another terminal) +python example_client.py +``` + +The comprehensive client test will check: +- βœ… Basic chat completion +- βœ… System message handling +- βœ… Streaming responses +- βœ… Multi-turn conversations +- βœ… Function calling +- βœ… Error handling + +### cURL Examples + +You can also test the server using cURL commands: + +**Basic Chat:** +```bash +curl -X POST "http://localhost:8000/v1/chat/completions" \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer dummy-key" \ + -d '{ + "model": "camel-model", + "messages": [ + {"role": "user", "content": "Hello! How are you?"} + ] + }' +``` + +**With System Message:** +```bash +curl -X POST "http://localhost:8000/v1/chat/completions" \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer dummy-key" \ + -d '{ + "model": "camel-model", + "messages": [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Explain quantum physics simply"} + ] + }' +``` + +**Streaming Response:** +```bash +curl -X POST "http://localhost:8000/v1/chat/completions" \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer dummy-key" \ + -d '{ + "model": "camel-model", + "messages": [ + {"role": "user", "content": "Tell me a story"} + ], + "stream": true + }' +``` + +## Using the API + +Once the server is running, you can use it just like the OpenAI API: + +### With OpenAI Python Client + +```python +import openai + +client = openai.OpenAI( + base_url="http://localhost:8000/v1", + api_key="dummy-key" # Any string works +) + +response = client.chat.completions.create( + model="camel-model", + messages=[ + {"role": "user", "content": "Hello, how are you?"} + ] +) + +print(response.choices[0].message.content) +``` + +### With cURL + +```bash +curl -X POST "http://localhost:8000/v1/chat/completions" \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer dummy-key" \ + -d '{ + "model": "camel-model", + "messages": [ + {"role": "user", "content": "Hello, how are you?"} + ] + }' +``` + +### Streaming Responses + +```python +response = client.chat.completions.create( + model="camel-model", + messages=[ + {"role": "user", "content": "Tell me a story"} + ], + stream=True +) + +for chunk in response: + if chunk.choices[0].delta.content: + print(chunk.choices[0].delta.content, end="") +``` + +## Tool/Function Calling + +The server supports OpenAI-style function calling: + +```python +# Define a function +functions = [ + { + "name": "get_weather", + "description": "Get the current weather", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state" + } + }, + "required": ["location"] + } + } +] + +response = client.chat.completions.create( + model="camel-model", + messages=[ + {"role": "user", "content": "What's the weather like in New York?"} + ], + functions=functions +) + +# Handle function calls +if response.choices[0].message.function_call: + function_name = response.choices[0].message.function_call.name + function_args = response.choices[0].message.function_call.arguments + # Process the function call... +``` + +## Advanced Configuration + +You can customize the ChatAgent before creating the server: + +```python +from camel.agents import ChatAgent +from camel.toolkits import FunctionTool + +# Create a custom tool +def my_custom_tool(query: str) -> str: + """A custom tool for the agent.""" + return f"Processed: {query}" + +# Create agent with custom configuration +agent = ChatAgent( + system_message="You are a specialized assistant.", + model="gpt-4o", + tools=[my_custom_tool], + temperature=0.8, + max_tokens=1000 +) + +# Create and serve the API +app = agent.to_openai_compatible_server() +``` + +## Error Handling + +The server includes proper error handling and returns appropriate HTTP status codes and error messages compatible with the OpenAI API format. + +## Limitations + +- The server currently supports the `/v1/chat/completions` endpoint +- Some advanced OpenAI features may not be fully supported +- Function calling is supported for external tools (tools that return requests rather than executing directly) + +## Troubleshooting + +### Import Errors + +If you get import errors for FastAPI or uvicorn: + +```bash +pip install fastapi uvicorn +``` + +### Port Already in Use + +If port 8000 is already in use, specify a different port: + +```python +uvicorn.run(app, host="0.0.0.0", port=8080) +``` + +### Model Configuration + +Make sure your ChatAgent is properly configured with a valid model before creating the server. The model should be available and properly authenticated if using external APIs. \ No newline at end of file diff --git a/examples/agents/openai-server/example_openai_server.py b/examples/agents/openai-server/example_openai_server.py new file mode 100644 index 0000000000..12f9a678ce --- /dev/null +++ b/examples/agents/openai-server/example_openai_server.py @@ -0,0 +1,68 @@ +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +#!/usr/bin/env python3 +""" +Example script demonstrating the to_openai_compatible_server() method. + +This shows how to create an OpenAI-compatible server from a ChatAgent +and serve it with uvicorn. +""" + +from camel.agents import ChatAgent + + +def main(): + # Create a ChatAgent instance + agent = ChatAgent( + system_message="You are a helpful assistant.", + # You can specify any model supported by CAMEL + # model="gpt-4o-mini", # Uncomment to use a specific model + ) + + # Create the FastAPI server + app = agent.to_openai_compatible_server() + + # You can now serve the app with uvicorn + print("Starting OpenAI-compatible server on http://localhost:8000") + print("API endpoint: http://localhost:8000/v1/chat/completions") + print("Press Ctrl+C to stop the server") + + try: + import uvicorn + + uvicorn.run(app, host="0.0.0.0", port=8000) + except ImportError: + print("\nError: uvicorn not installed.") + print("Install it with: pip install uvicorn") + print( + "Then run: uvicorn example_openai_server:app " + "--host 0.0.0.0 --port 8000" + ) + + # Return the app so it can be imported and used with uvicorn command + return app + + return app + + +# For uvicorn command line usage: +# uvicorn example_openai_server:app --host 0.0.0.0 --port 8000 +app = None + +if __name__ == "__main__": + app = main() +else: + # If imported, create the app + agent = ChatAgent(system_message="You are a helpful assistant.") + app = agent.to_openai_compatible_server() diff --git a/examples/agents/openai-server/simple_client_example.py b/examples/agents/openai-server/simple_client_example.py new file mode 100644 index 0000000000..1d44c4dd05 --- /dev/null +++ b/examples/agents/openai-server/simple_client_example.py @@ -0,0 +1,169 @@ +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +""" +Simple client example for CAMEL OpenAI-compatible server. + +Start the server first: + python example_openai_server.py + +Then run this client: + python simple_client_example.py +""" + +import openai + +# Configure client to use CAMEL server +client = openai.OpenAI( + base_url="http://localhost:8000/v1", + api_key="dummy-key", # Any string works +) + + +def main(): + print("πŸš€ Testing CAMEL OpenAI-compatible server...") + + # Basic chat + print("\n1. Basic Chat:") + response = client.chat.completions.create( + model="camel-model", + messages=[{"role": "user", "content": "Hello! Tell me a fun fact."}], + ) + print(f"Response: {response.choices[0].message.content}") + + # With system message + print("\n2. With System Message:") + response = client.chat.completions.create( + model="camel-model", + messages=[ + {"role": "system", "content": "You are a helpful math tutor."}, + {"role": "user", "content": "Explain what is 2+2 in a fun way."}, + ], + ) + print(f"Response: {response.choices[0].message.content}") + + # Streaming + print("\n3. Streaming Response:") + response = client.chat.completions.create( + model="camel-model", + messages=[{"role": "user", "content": "Count from 1 to 5 slowly."}], + stream=True, + ) + + print("Streaming: ", end="") + for chunk in response: + if chunk.choices[0].delta.content: + print(chunk.choices[0].delta.content, end="", flush=True) + print() + + # Tool calling example + print("\n4. Tool Calling:") + + # Define a simple calculator tool + calculator_tool = { + "type": "function", + "function": { + "name": "calculate", + "description": "Perform basic arithmetic operations", + "parameters": { + "type": "object", + "properties": { + "operation": { + "type": "string", + "enum": ["add", "subtract", "multiply", "divide"], + "description": "The arithmetic operation to perform", + }, + "a": {"type": "number", "description": "First number"}, + "b": {"type": "number", "description": "Second number"}, + }, + "required": ["operation", "a", "b"], + }, + }, + } + + response = client.chat.completions.create( + model="camel-model", + messages=[ + { + "role": "user", + "content": "What is 15 multiplied by 8?Use calculator tool.", + } + ], + tools=[calculator_tool], + ) + + # Check if the model wants to call a tool + if response.choices[0].message.tool_calls: + tool_call = response.choices[0].message.tool_calls[0] + print(f"Tool called: {tool_call.function.name}") + print(f"Arguments: {tool_call.function.arguments}") + + # In a real implementation, you would execute the tool here + # For this example, we'll just show what would happen + import json + + args = json.loads(tool_call.function.arguments) + if args["operation"] == "multiply": + result = args["a"] * args["b"] + print(f"Tool result: {result}") + else: + print(f"Response: {response.choices[0].message.content}") + + print("\nβœ… All tests completed!") + + +if __name__ == "__main__": + main() + +""" +πŸš€ Testing CAMEL OpenAI-compatible server... + +1. Basic Chat: +Response: Sure! Did you know that honey never spoils? Archaeologists have +found pots of honey in ancient Egyptian tombs that are over 3,000 years old +and still perfectly edible! Honey's long shelf life is due to its low moisture +content and acidic pH, which create an inhospitable environment for bacteria +and microorganisms. + +2. With System Message: +Response: Sure! Imagine you have two playful puppies. One is wagging its tail +on the left, and the other is bouncing around on the right. Now, if you add +one more puppy who jumps in to join the fun, you'll have a total of three +puppies! But wait, let's bring in one more adorable puppy who can't resist the +excitement. + +So now, how many puppies do you have? You started with two, then you added one +more (three), and added another oneβ€”ta-da! That gives you 2 + 2 = 4 happy +puppies bouncing around! + +That's how math worksβ€”like a puppy party, where you just keep adding more joy! +πŸΆπŸŽ‰ + +3. Streaming Response: +Streaming: Sure! Here we go: + +1... +2... +3... +4... +5... + +Nice and slow! That's counting from 1 to 5! + +4. Tool Calling: +Tool called: calculate +Arguments: {"operation": "multiply", "a": 15, "b": 8} +Tool result: 120 + +βœ… All tests completed! +""" diff --git a/pyproject.toml b/pyproject.toml index 28da857b4a..de421b9959 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,6 +33,7 @@ dependencies = [ "openai>=1.86.0,<2", "websockets>=13.0,<15.1", "astor>=0.8.1", + "fastapi>=0.119.1", ] diff --git a/test/toolkits/test_openai_function.py b/test/toolkits/test_openai_function.py index eae85ca4e7..ffc14b6dce 100644 --- a/test/toolkits/test_openai_function.py +++ b/test/toolkits/test_openai_function.py @@ -105,6 +105,7 @@ def test_all_parameters( 'assistant', 'user', 'system', + 'tool', 'critic', 'embodiment', 'default', @@ -128,6 +129,7 @@ def test_all_parameters( 'assistant', 'user', 'system', + 'tool', 'critic', 'embodiment', 'default', diff --git a/uv.lock b/uv.lock index 9c5d1fdfa7..1a0296d58e 100644 --- a/uv.lock +++ b/uv.lock @@ -796,6 +796,7 @@ dependencies = [ { name = "astor" }, { name = "colorama" }, { name = "docstring-parser" }, + { name = "fastapi" }, { name = "httpx" }, { name = "jsonschema" }, { name = "mcp" }, @@ -1292,6 +1293,7 @@ requires-dist = [ { name = "faiss-cpu", marker = "extra == 'all'", specifier = ">=1.7.2,<2" }, { name = "faiss-cpu", marker = "extra == 'rag'", specifier = ">=1.7.2,<2" }, { name = "faiss-cpu", marker = "extra == 'storage'", specifier = ">=1.7.2,<2" }, + { name = "fastapi", specifier = ">=0.119.1" }, { name = "fastapi", marker = "extra == 'all'", specifier = ">=0.115.11" }, { name = "fastapi", marker = "extra == 'web-tools'", specifier = ">=0.115.11" }, { name = "ffmpeg-python", marker = "extra == 'all'", specifier = ">=0.2.0,<0.3" },