Skip to content

Commit cc89a7c

Browse files
committed
add responses-structured-outputs example
1 parent 29c7850 commit cc89a7c

File tree

2 files changed

+353
-0
lines changed

2 files changed

+353
-0
lines changed
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
[package]
2+
name = "responses-structured-outputs"
3+
version = "0.1.0"
4+
edition = "2021"
5+
publish = false
6+
7+
[dependencies]
8+
async-openai = { path = "../../async-openai" }
9+
serde_json = "1.0"
10+
tokio = { version = "1", features = ["full"] }
11+
clap = { version = "4", features = ["derive"] }
Lines changed: 342 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,342 @@
1+
use std::error::Error;
2+
3+
use async_openai::{
4+
config::OpenAIConfig,
5+
types::{
6+
chat::ResponseFormatJsonSchema,
7+
responses::{
8+
CreateResponseArgs, InputMessage, InputRole, OutputItem, OutputMessageContent,
9+
},
10+
},
11+
Client,
12+
};
13+
use clap::Parser;
14+
use serde_json::json;
15+
16+
/// Chain of thought example: Guides the model through step-by-step reasoning
17+
async fn chain_of_thought(client: &Client<OpenAIConfig>) -> Result<(), Box<dyn Error>> {
18+
println!("=== Chain of Thought Example ===\n");
19+
20+
let schema = json!({
21+
"type": "object",
22+
"properties": {
23+
"steps": {
24+
"type": "array",
25+
"items": {
26+
"type": "object",
27+
"properties": {
28+
"explanation": { "type": "string" },
29+
"output": { "type": "string" }
30+
},
31+
"required": ["explanation", "output"],
32+
"additionalProperties": false
33+
}
34+
},
35+
"final_answer": { "type": "string" }
36+
},
37+
"required": ["steps", "final_answer"],
38+
"additionalProperties": false
39+
});
40+
41+
let request = CreateResponseArgs::default()
42+
.model("gpt-4o-2024-08-06")
43+
.max_output_tokens(512u32)
44+
.text(ResponseFormatJsonSchema {
45+
description: Some(
46+
"A step-by-step reasoning process for solving math problems".to_string(),
47+
),
48+
name: "math_reasoning".to_string(),
49+
schema: Some(schema),
50+
strict: Some(true),
51+
})
52+
.input(vec![
53+
InputMessage {
54+
role: InputRole::System,
55+
content: vec![
56+
"You are a helpful math tutor. Guide the user through the solution step by step."
57+
.into(),
58+
],
59+
status: None,
60+
},
61+
InputMessage {
62+
role: InputRole::User,
63+
content: vec!["How can I solve 8x + 7 = -23?".into()],
64+
status: None,
65+
},
66+
])
67+
.build()?;
68+
69+
let response = client.responses().create(request).await?;
70+
71+
for output in response.output {
72+
if let OutputItem::Message(message) = output {
73+
for content in message.content {
74+
if let OutputMessageContent::OutputText(text) = content {
75+
println!("Response:\n{}\n", text.text);
76+
}
77+
}
78+
}
79+
}
80+
81+
Ok(())
82+
}
83+
84+
/// Structured data extraction example: Extracts specific fields from unstructured text
85+
async fn structured_data_extraction(client: &Client<OpenAIConfig>) -> Result<(), Box<dyn Error>> {
86+
println!("=== Structured Data Extraction Example ===\n");
87+
88+
let schema = json!({
89+
"type": "object",
90+
"properties": {
91+
"name": { "type": "string" },
92+
"age": { "type": "integer" },
93+
"occupation": { "type": "string" },
94+
"location": { "type": "string" },
95+
"email": { "type": "string" }
96+
},
97+
"required": ["name", "age", "occupation", "email", "location"],
98+
"additionalProperties": false
99+
});
100+
101+
let text = "Hi, I'm Sarah Johnson. I'm 28 years old and I work as a software engineer in San Francisco. You can reach me at [email protected].";
102+
103+
let request = CreateResponseArgs::default()
104+
.model("gpt-4o-2024-08-06")
105+
.max_output_tokens(256u32)
106+
.text(ResponseFormatJsonSchema {
107+
description: Some("Extract structured information from text".to_string()),
108+
name: "person_info".to_string(),
109+
schema: Some(schema),
110+
strict: Some(true),
111+
})
112+
.input(vec![
113+
InputMessage {
114+
role: InputRole::System,
115+
content: vec!["Extract the following information from the user's text: name, age, occupation, location, and email. If any information is not present, omit that field.".into()],
116+
status: None,
117+
},
118+
InputMessage {
119+
role: InputRole::User,
120+
content: vec![text.into()],
121+
status: None,
122+
},
123+
])
124+
.build()?;
125+
126+
let response = client.responses().create(request).await?;
127+
128+
println!("Input text: {}\n", text);
129+
for output in response.output {
130+
if let OutputItem::Message(message) = output {
131+
for content in message.content {
132+
if let OutputMessageContent::OutputText(text) = content {
133+
println!("Extracted data:\n{}\n", text.text);
134+
}
135+
}
136+
}
137+
}
138+
139+
Ok(())
140+
}
141+
142+
/// UI generation example: Generates UI component code based on description
143+
async fn ui_generation(client: &Client<OpenAIConfig>) -> Result<(), Box<dyn Error>> {
144+
println!("=== UI Generation Example ===\n");
145+
146+
let schema = json!({
147+
"type": "object",
148+
"properties": {
149+
"type": {
150+
"type": "string",
151+
"description": "The type of the UI component",
152+
"enum": ["div", "button", "header", "section", "field", "form"]
153+
},
154+
"label": {
155+
"type": "string",
156+
"description": "The label of the UI component, used for buttons or form fields"
157+
},
158+
"children": {
159+
"type": "array",
160+
"description": "Nested UI components",
161+
"items": {"$ref": "#"}
162+
},
163+
"attributes": {
164+
"type": "array",
165+
"description": "Arbitrary attributes for the UI component, suitable for any element",
166+
"items": {
167+
"type": "object",
168+
"properties": {
169+
"name": {
170+
"type": "string",
171+
"description": "The name of the attribute, for example onClick or className"
172+
},
173+
"value": {
174+
"type": "string",
175+
"description": "The value of the attribute"
176+
}
177+
},
178+
"required": ["name", "value"],
179+
"additionalProperties": false
180+
}
181+
}
182+
},
183+
"required": ["type", "label", "children", "attributes"],
184+
"additionalProperties": false
185+
186+
});
187+
188+
let request = CreateResponseArgs::default()
189+
.model("gpt-4o-2024-08-06")
190+
.max_output_tokens(1024u32)
191+
.text(ResponseFormatJsonSchema {
192+
description: Some("Generate HTML and CSS code for UI components".to_string()),
193+
name: "ui_component".to_string(),
194+
schema: Some(schema),
195+
strict: Some(true),
196+
})
197+
.input(vec![
198+
InputMessage {
199+
role: InputRole::System,
200+
content: vec!["You are a UI designer. Generate clean, modern HTML and CSS code for the requested UI component. The HTML should be semantic and accessible, and the CSS should be well-organized.".into()],
201+
status: None,
202+
},
203+
InputMessage {
204+
role: InputRole::User,
205+
content: vec!["Create a login form with email and password fields, a submit button, and a link to sign up. Use a modern, clean design with a light color scheme.".into()],
206+
status: None,
207+
},
208+
])
209+
.build()?;
210+
211+
let response = client.responses().create(request).await?;
212+
213+
for output in response.output {
214+
if let OutputItem::Message(message) = output {
215+
for content in message.content {
216+
if let OutputMessageContent::OutputText(text) = content {
217+
println!("Generated UI Component:\n{}\n", text.text);
218+
}
219+
}
220+
}
221+
}
222+
223+
Ok(())
224+
}
225+
226+
/// Moderation example: Analyzes content for policy violations with structured output
227+
async fn moderation(client: &Client<OpenAIConfig>) -> Result<(), Box<dyn Error>> {
228+
println!("=== Moderation Example ===\n");
229+
230+
let schema = json!({
231+
"type": "object",
232+
"properties": {
233+
"is_violating": {
234+
"type": "boolean",
235+
"description": "Indicates if the content is violating guidelines"
236+
},
237+
"category": {
238+
"type": ["string", "null"],
239+
"description": "Type of violation, if the content is violating guidelines. Null otherwise.",
240+
"enum": ["violence", "sexual", "self_harm"]
241+
},
242+
"explanation_if_violating": {
243+
"type": ["string", "null"],
244+
"description": "Explanation of why the content is violating"
245+
}
246+
},
247+
"required": ["is_violating", "category", "explanation_if_violating"],
248+
"additionalProperties": false
249+
});
250+
251+
let content_to_moderate = "How do I prepare for a job interview?";
252+
253+
let request = CreateResponseArgs::default()
254+
.model("gpt-4o-2024-08-06")
255+
.max_output_tokens(256u32)
256+
.text(ResponseFormatJsonSchema {
257+
description: Some("Analyze content for policy violations and provide structured moderation results".to_string()),
258+
name: "moderation_result".to_string(),
259+
schema: Some(schema),
260+
strict: Some(true),
261+
})
262+
.input(vec![
263+
InputMessage {
264+
role: InputRole::System,
265+
content: vec!["Determine if the user input violates specific guidelines and explain if they do.".into()],
266+
status: None,
267+
},
268+
InputMessage {
269+
role: InputRole::User,
270+
content: vec![content_to_moderate.into()],
271+
status: None,
272+
},
273+
])
274+
.build()?;
275+
276+
let response = client.responses().create(request).await?;
277+
278+
for output in response.output {
279+
if let OutputItem::Message(message) = output {
280+
for content in message.content {
281+
if let OutputMessageContent::OutputText(text) = content {
282+
println!("Content analyzed: {}\n", content_to_moderate);
283+
println!("Moderation result:\n{}\n", text.text);
284+
}
285+
}
286+
}
287+
}
288+
289+
Ok(())
290+
}
291+
292+
#[derive(Parser, Debug)]
293+
#[command(name = "responses-structured-outputs")]
294+
#[command(about = "Examples of structured outputs using the Responses API", long_about = None)]
295+
struct Cli {
296+
/// Which example to run
297+
#[arg(value_enum)]
298+
example: Example,
299+
}
300+
301+
#[derive(clap::ValueEnum, Clone, Debug)]
302+
enum Example {
303+
/// Chain of thought: Step-by-step reasoning for math problems
304+
ChainOfThought,
305+
/// Structured data extraction: Extract fields from unstructured text
306+
DataExtraction,
307+
/// UI generation: Generate HTML and CSS for UI components
308+
UiGeneration,
309+
/// Moderation: Analyze content for policy violations
310+
Moderation,
311+
/// Run all examples
312+
All,
313+
}
314+
315+
#[tokio::main]
316+
async fn main() -> Result<(), Box<dyn Error>> {
317+
let cli = Cli::parse();
318+
let client = Client::new();
319+
320+
match cli.example {
321+
Example::ChainOfThought => {
322+
chain_of_thought(&client).await?;
323+
}
324+
Example::DataExtraction => {
325+
structured_data_extraction(&client).await?;
326+
}
327+
Example::UiGeneration => {
328+
ui_generation(&client).await?;
329+
}
330+
Example::Moderation => {
331+
moderation(&client).await?;
332+
}
333+
Example::All => {
334+
chain_of_thought(&client).await?;
335+
structured_data_extraction(&client).await?;
336+
ui_generation(&client).await?;
337+
moderation(&client).await?;
338+
}
339+
}
340+
341+
Ok(())
342+
}

0 commit comments

Comments
 (0)