diff --git a/README.md b/README.md index 9311b477a3..b67eaff562 100644 --- a/README.md +++ b/README.md @@ -118,6 +118,50 @@ response = client.responses.create( ], ) ``` +### Updated Response API Pattern +#### Important: Function calling differences + +**⚠️ Breaking Change:** The Responses API uses a different function calling pattern than Chat Completions. + +**Chat Completions pattern (❌ Not supported in Responses API):** +This DOES NOT work with Responses API +messages = [ +{"role": "assistant", "tool_calls": [...]}, # ❌ Raises error +{"role": "tool", "content": "...", "tool_call_id": "..."} # ❌ Raises error +] + +text + +**Responses API pattern (✅ Correct approach):** +from openai import OpenAI +client = OpenAI() + +Initial request +response = client.responses.create( +model="gpt-4o", +tools=[{"type": "function", "name": "get_weather", ...}], +input=[{"role": "user", "content": "What's the weather?"}] +) + +Append entire output to input +input_messages = response.output + +Execute function calls +for item in response.output: +if item.type == "function_call": +result = execute_function(item.name, item.arguments) +input_messages.append({ +"type": "function_call_output", # Use 'type', not 'role' +"call_id": item.call_id, +"output": result +}) + +Get final response +final = client.responses.create(model="gpt-4o", tools=tools, input=input_messages) + +text + +See `examples/responses/function_calling_migration.py` for a complete migration guide. ## Async usage diff --git a/docs/RESPONSES_MIGRATION.md b/docs/RESPONSES_MIGRATION.md new file mode 100644 index 0000000000..6cab414b32 --- /dev/null +++ b/docs/RESPONSES_MIGRATION.md @@ -0,0 +1,132 @@ +# Migrating Function Calling from Chat Completions to Responses API + +## Overview + +The Responses API (`/v1/responses`) uses a fundamentally different approach to function calling compared to the Chat Completions API (`/v1/chat/completions`). This guide helps you migrate existing code. + +## Issue Reference + +- GitHub Issue: [#2677](https://github.com/openai/openai-python/issues/2677) +- API Documentation: [Function Calling Guide](https://platform.openai.com/docs/guides/function-calling?api-mode=responses) + +## Key Differences + +| Aspect | Chat Completions | Responses API | +|--------|-----------------|---------------| +| **Function requests from model** | `role: "assistant"` with `tool_calls` | `type: "function_call"` items in output | +| **Function results to model** | `role: "tool"` messages | `type: "function_call_output"` items | +| **Conversation management** | Manual message construction | Append entire `response.output` | +| **Tool role support** | ✅ Supported | ❌ Not supported (raises error) | +| **Assistant tool_calls in input** | ✅ Supported | ❌ Not supported (raises error) | + +## Error Messages You Might See + +{ +"error": { +"message": "Unknown parameter: 'input.tool_calls'.", +"type": "invalid_request_error" +} +} + +text +undefined +{ +"error": { +"message": "Invalid value: 'tool'. Supported values are: 'assistant', 'system', 'developer', and 'user'.", +"type": "invalid_request_error" +} +} + +text + +These errors indicate you're using Chat Completions patterns in the Responses API. + +## Migration Steps + +### Step 1: Remove Manual Tool Message Construction + +**Before (Chat Completions):** +messages = [ +{"role": "user", "content": "What's the weather?"}, +{"role": "assistant", "tool_calls": [{"id": "call_123", ...}]}, +{"role": "tool", "content": '{"temp": 20}', "tool_call_id": "call_123"} +] + +text + +**After (Responses API):** +input_messages = [ +{"role": "user", "content": "What's the weather?"} +] + +response = client.responses.create(model="gpt-4o", tools=tools, input=input_messages) +input_messages.extend(response.output) # Append entire output + +text + +### Step 2: Update Function Output Format + +**Before:** +messages.append({ +"role": "tool", +"tool_call_id": tool_call.id, +"content": json.dumps(result) +}) + +text + +**After:** +input_messages.append({ +"type": "function_call_output", +"call_id": item.call_id, +"output": json.dumps(result) +}) + +text + +### Step 3: Update Function Call Detection + +**Before:** +if response_message.tool_calls: +for tool_call in response_message.tool_calls: +# Execute function + +text + +**After:** +for item in response.output: +if item.type == "function_call": +# Execute function + +text + +## Complete Example + +See [`examples/responses/function_calling_migration.py`](../examples/responses/function_calling_migration.py) for a fully working example. + +## Alternative: Use Built-in Tools + +For common tasks, consider using OpenAI's built-in tools instead of custom functions: + +response = client.responses.create( +model="gpt-4o", +input="Search for latest AI news", +tools=[{"type": "web_search_preview"}] # No manual loop needed +) + +print(response.output_text) # Automatically includes search results + +text + +## When to Use Each API + +- **Use Chat Completions** if you need the traditional conversation format or are integrating with existing tooling +- **Use Responses API** for new projects, especially those using reasoning models or built-in tools + +Both APIs are supported indefinitely. + +## Additional Resources + +- [Official Migration Guide](https://platform.openai.com/docs/guides/responses-vs-chat-completions) +- [Function Calling Documentation](https://platform.openai.com/docs/guides/function-calling?api-mode=responses) +- [Example Code](../examples/responses/) \ No newline at end of file diff --git a/examples/responses/README.md b/examples/responses/README.md new file mode 100644 index 0000000000..0a9f540df9 --- /dev/null +++ b/examples/responses/README.md @@ -0,0 +1,27 @@ +# Responses API Examples + +Examples demonstrating the Responses API functionality. + +## Files + +- **`function_calling_migration.py`** - Complete guide for migrating from Chat Completions tool calling to Responses API function calling. Addresses [Issue #2677](https://github.com/openai/openai-python/issues/2677). + +## Key Differences from Chat Completions + +The Responses API does **NOT** support: +- `role: "assistant"` messages with `tool_calls` in input +- `role: "tool"` messages + +Instead, use: +- `type: "function_call"` items (from model output) +- `type: "function_call_output"` items (your function results) + +See `function_calling_migration.py` for detailed examples. + +## Running Examples + +export OPENAI_API_KEY="your-api-key" +python examples/responses/function_calling_migration.py + +text +undefined \ No newline at end of file diff --git a/examples/responses/function_calling_migration.py b/examples/responses/function_calling_migration.py new file mode 100644 index 0000000000..7d073e34b1 --- /dev/null +++ b/examples/responses/function_calling_migration.py @@ -0,0 +1,261 @@ +""" +Migration guide: Chat Completions tool calls → Responses API function calls + +This example shows how to migrate from the Chat Completions API's tool calling +pattern (using 'assistant' role with 'tool_calls' and 'tool' role) to the +Responses API's function calling pattern (using typed items). + +Related issue: https://github.com/openai/openai-python/issues/2677 +""" + +from openai import OpenAI +import json +import os + +client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) + +# ============================================================================ +# OLD PATTERN (Chat Completions) - NO LONGER WORKS IN RESPONSES API +# ============================================================================ +""" +# ❌ This pattern DOES NOT work in Responses API: +messages = [ + {"role": "developer", "content": "You are a helpful assistant"}, + {"role": "user", "content": "What's the weather in Paris?"}, + {"role": "assistant", "tool_calls": [...]}, # ❌ NOT SUPPORTED + {"role": "tool", "content": "...", "tool_call_id": "..."} # ❌ NOT SUPPORTED +] +response = client.responses.create(model="gpt-4o", input=messages) +# Will raise: "Unknown parameter: 'input[X].tool_calls'" +# Will raise: "Invalid value: 'tool'. Supported values are: 'assistant', 'system', 'developer', and 'user'." +""" + +# ============================================================================ +# NEW PATTERN (Responses API) - CORRECT APPROACH +# ============================================================================ + +# Define your custom functions +tools = [ + { + "type": "function", + "name": "get_weather", + "description": "Get current weather for a location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "City name, e.g., Paris, Tokyo" + }, + "unit": { + "type": "string", + "enum": ["celsius", "fahrenheit"], + "description": "Temperature unit" + } + }, + "required": ["location"] + } + } +] + + +def get_weather(location: str, unit: str = "celsius") -> dict: + """Mock weather function - replace with actual API call""" + mock_data = { + "paris": {"temp": 18, "condition": "Partly cloudy"}, + "tokyo": {"temp": 24, "condition": "Sunny"}, + "london": {"temp": 12, "condition": "Rainy"} + } + + data = mock_data.get(location.lower(), {"temp": 20, "condition": "Unknown"}) + return { + "location": location, + "temperature": data["temp"], + "unit": unit, + "condition": data["condition"] + } + + +def execute_function_call(function_name: str, arguments: str) -> str: + """Execute the function and return JSON result""" + args = json.loads(arguments) + + if function_name == "get_weather": + result = get_weather(**args) + else: + result = {"error": f"Unknown function: {function_name}"} + + return json.dumps(result) + + +def run_conversation_with_tools(): + """ + Correct pattern for using function calls in Responses API. + + Key differences from Chat Completions: + 1. Don't manually add 'assistant' messages with 'tool_calls' + 2. Don't use 'tool' role - use 'function_call_output' type instead + 3. Append entire response.output to input for continuation + """ + + # Start with user message + input_messages = [ + {"role": "user", "content": "What's the weather in Paris and Tokyo?"} + ] + + print("=" * 70) + print("STEP 1: Initial request with function definitions") + print("=" * 70) + + # Make initial request with tools + response = client.responses.create( + model="gpt-4o", + instructions="You are a helpful weather assistant", + tools=tools, + input=input_messages + ) + + print(f"Response status: {response.status}") + print(f"Output items: {len(response.output)}") + + # ✅ CRITICAL: Add entire output to input (not just tool_calls) + input_messages.extend(response.output) + + # Check for function calls in output + function_calls = [item for item in response.output if item.type == "function_call"] + + if function_calls: + print(f"\nFound {len(function_calls)} function call(s)") + + print("\n" + "=" * 70) + print("STEP 2: Execute functions and add outputs") + print("=" * 70) + + # Execute each function call + for fc in function_calls: + print(f"\n Executing: {fc.name}({fc.arguments})") + result = execute_function_call(fc.name, fc.arguments) + print(f" Result: {result}") + + # ✅ Add function output as typed item (NOT 'tool' role) + input_messages.append({ + "type": "function_call_output", # Use 'type', not 'role' + "call_id": fc.call_id, + "output": result + }) + + print("\n" + "=" * 70) + print("STEP 3: Get final response with function results") + print("=" * 70) + + # Request final response + final_response = client.responses.create( + model="gpt-4o", + instructions="You are a helpful weather assistant", + tools=tools, + input=input_messages + ) + + print(f"\nFinal answer: {final_response.output_text}") + + else: + print(f"\nDirect response (no function calls): {response.output_text}") + + +def run_multi_turn_conversation(): + """ + Example of multi-turn conversation with function calling. + Shows how to maintain conversation state across multiple turns. + """ + print("\n\n" + "=" * 70) + print("MULTI-TURN CONVERSATION EXAMPLE") + print("=" * 70) + + input_messages = [] + + # Turn 1 + input_messages.append({"role": "user", "content": "What's the weather in Paris?"}) + + response = client.responses.create( + model="gpt-4o", + instructions="You are a helpful weather assistant", + tools=tools, + input=input_messages + ) + + input_messages.extend(response.output) + + # Execute function calls if present + for item in response.output: + if item.type == "function_call": + result = execute_function_call(item.name, item.arguments) + input_messages.append({ + "type": "function_call_output", + "call_id": item.call_id, + "output": result + }) + + # Get response after function execution + response = client.responses.create( + model="gpt-4o", + instructions="You are a helpful weather assistant", + tools=tools, + input=input_messages + ) + + print(f"Turn 1 response: {response.output_text}") + input_messages.extend(response.output) + + # Turn 2 - Follow-up question + input_messages.append({"role": "user", "content": "What about Tokyo?"}) + + response = client.responses.create( + model="gpt-4o", + instructions="You are a helpful weather assistant", + tools=tools, + input=input_messages + ) + + input_messages.extend(response.output) + + # Execute function calls + for item in response.output: + if item.type == "function_call": + result = execute_function_call(item.name, item.arguments) + input_messages.append({ + "type": "function_call_output", + "call_id": item.call_id, + "output": result + }) + + # Get final response + response = client.responses.create( + model="gpt-4o", + instructions="You are a helpful weather assistant", + tools=tools, + input=input_messages + ) + + print(f"Turn 2 response: {response.output_text}") + + +if __name__ == "__main__": + # Run basic example + run_conversation_with_tools() + + # Run multi-turn example + run_multi_turn_conversation() + + print("\n" + "=" * 70) + print("MIGRATION CHECKLIST:") + print("=" * 70) + print("✅ Replace: {'role': 'assistant', 'tool_calls': [...]} ") + print(" With: Append entire response.output to input") + print() + print("✅ Replace: {'role': 'tool', 'content': '...', 'tool_call_id': '...'}") + print(" With: {'type': 'function_call_output', 'call_id': '...', 'output': '...'}") + print() + print("✅ Always extend input_messages with response.output") + print() + print("✅ Check item.type == 'function_call' to detect function calls") + print("=" * 70)