Skip to content

Commit a02fa1b

Browse files
rushitatcursoragent
andcommitted
Add Responses API example and documentation
Co-authored-by: Cursor <cursoragent@cursor.com>
1 parent fd535d6 commit a02fa1b

File tree

3 files changed

+86
-2
lines changed

3 files changed

+86
-2
lines changed

README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ The Gradient SDK provides clients for:
3131
* DigitalOcean API
3232
* Gradient Serverless Inference
3333
* Gradient Agent Inference
34+
* **Responses API**`client.responses` for structured request/response with tools (e.g. GPT 5.2 Pro, 5.1 Codex Max). See `examples/responses_tool_calling.py`.
3435

3536
The full API of this library can be found in [api.md](api.md).
3637

examples/responses_tool_calling.py

Lines changed: 81 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,81 @@
1+
"""
2+
Example: Responses API with tool (function) calling
3+
4+
Demonstrates using client.responses.create() with a function tool: send a user
5+
message, handle a function_call in the output, append the function result as
6+
function_call_output, call create again, and print the final text.
7+
8+
Requires GRADIENT_MODEL_ACCESS_KEY in the environment (e.g. from a .env file).
9+
"""
10+
11+
import os
12+
from gradient import Gradient, ResponsesModels
13+
from gradient.types.responses.response_create_response import ResponseOutputFunctionCall
14+
15+
# Load .env if available (e.g. python-dotenv)
16+
try:
17+
from dotenv import load_dotenv
18+
load_dotenv()
19+
except ImportError:
20+
pass
21+
22+
MODEL_ACCESS_KEY = os.environ.get("GRADIENT_MODEL_ACCESS_KEY")
23+
if not MODEL_ACCESS_KEY:
24+
raise SystemExit("Set GRADIENT_MODEL_ACCESS_KEY in the environment to run this example.")
25+
26+
client = Gradient(model_access_key=MODEL_ACCESS_KEY)
27+
28+
# One function tool: get_weather
29+
get_weather_tool = {
30+
"type": "function",
31+
"function": {
32+
"name": "get_weather",
33+
"description": "Get the current weather for a city.",
34+
"parameters": {
35+
"type": "object",
36+
"properties": {
37+
"city": {"type": "string", "description": "City name"},
38+
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"], "default": "celsius"},
39+
},
40+
"required": ["city"],
41+
},
42+
},
43+
}
44+
45+
# Initial conversation: single user message
46+
input_messages: list[dict] = [
47+
{"type": "message", "role": "user", "content": "What's the weather in New York?"},
48+
]
49+
50+
# First call: model may return a function_call
51+
response = client.responses.create(
52+
model=ResponsesModels.GPT_5_1_CODEX_MAX,
53+
input=input_messages,
54+
tools=[get_weather_tool],
55+
tool_choice="auto",
56+
)
57+
58+
# If the model returned a function call, append it and the tool result, then call again
59+
for item in response.output:
60+
if isinstance(item, ResponseOutputFunctionCall):
61+
input_messages.append({
62+
"type": "function_call",
63+
"id": item.id,
64+
"name": item.name,
65+
"arguments": item.arguments,
66+
})
67+
# Simulated tool result (in a real app you would call your function here)
68+
input_messages.append({
69+
"type": "function_call_output",
70+
"call_id": item.id,
71+
"output": '{"temperature": 22, "unit": "celsius", "conditions": "sunny"}',
72+
})
73+
response = client.responses.create(
74+
model=ResponsesModels.GPT_5_1_CODEX_MAX,
75+
input=input_messages,
76+
tools=[get_weather_tool],
77+
tool_choice="auto",
78+
)
79+
break
80+
81+
print("Assistant:", response.output_text.strip() or "(no text)")

src/gradient/resources/responses/responses.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,8 @@ def create(
5858
Create a response from the Responses API (POST /v1/responses).
5959
6060
Args:
61-
model: Model ID (e.g. openai-gpt-5.2-pro).
61+
model: Model ID. Use ``ResponsesModels`` (e.g. ``ResponsesModels.GPT_5_2_PRO``)
62+
for recommended model IDs.
6263
input: List of input items: user messages, function_call, function_call_output.
6364
tools: Optional list of tools the model may call.
6465
max_output_tokens: Maximum tokens to generate.
@@ -135,7 +136,8 @@ async def create(
135136
Create a response from the Responses API (POST /v1/responses).
136137
137138
Args:
138-
model: Model ID (e.g. openai-gpt-5.2-pro).
139+
model: Model ID. Use ``ResponsesModels`` (e.g. ``ResponsesModels.GPT_5_2_PRO``)
140+
for recommended model IDs.
139141
input: List of input items: user messages, function_call, function_call_output.
140142
tools: Optional list of tools the model may call.
141143
max_output_tokens: Maximum tokens to generate.

0 commit comments

Comments
 (0)