Skip to content

Commit 71fff7a

Browse files
committed
Add code samples for the "images and vision" page
1 parent 9d7bef3 commit 71fff7a

File tree

4 files changed

+102
-1
lines changed

4 files changed

+102
-1
lines changed
Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
// SAMPLE: Analyzes image by passing a base64-encoded image through Responses API
2+
// PAGE: https://platform.openai.com/docs/guides/images-vision?api-mode=responses&format=base64-encoded#analyze-images
3+
// GUIDANCE: Instructions to run this code: https://aka.ms/oai/net/start
4+
#pragma warning disable OPENAI001
5+
6+
#:package OpenAI@2.*
7+
#:property PublishAot=false
8+
9+
using OpenAI.Responses;
10+
11+
string key = Environment.GetEnvironmentVariable("OPENAI_API_KEY")!;
12+
OpenAIResponseClient client = new(model: "gpt-5", apiKey: key);
13+
14+
using var http = new HttpClient();
15+
16+
// Download an image as stream
17+
using var stream = await http.GetStreamAsync("https://openai-documentation.vercel.app/images/cat_and_otter.png");
18+
19+
OpenAIResponse response1 = (OpenAIResponse)client.CreateResponse([
20+
ResponseItem.CreateUserMessageItem([
21+
ResponseContentPart.CreateInputTextPart("What is in this image?"),
22+
ResponseContentPart.CreateInputImagePart(BinaryData.FromStream(stream), "image/png")
23+
])
24+
]);
25+
26+
Console.WriteLine($"From image stream: {response1.GetOutputText()}");
27+
28+
// Download an image as byte array
29+
byte[] bytes = await http.GetByteArrayAsync("https://openai-documentation.vercel.app/images/cat_and_otter.png");
30+
31+
OpenAIResponse response2 = (OpenAIResponse)client.CreateResponse([
32+
ResponseItem.CreateUserMessageItem([
33+
ResponseContentPart.CreateInputTextPart("What is in this image?"),
34+
ResponseContentPart.CreateInputImagePart(BinaryData.FromBytes(bytes), "image/png")
35+
])
36+
]);
37+
38+
Console.WriteLine($"From byte array: {response2.GetOutputText()}");
39+
40+
// Convert the byte array to a base64 string
41+
string base64 = Convert.ToBase64String(bytes);
42+
43+
OpenAIResponse response3 = (OpenAIResponse)client.CreateResponse([
44+
ResponseItem.CreateUserMessageItem([
45+
ResponseContentPart.CreateInputTextPart("What is in this image?"),
46+
ResponseContentPart.CreateInputImagePart(BinaryData.FromString(base64), "image/png")
47+
])
48+
]);
49+
50+
Console.WriteLine($"From base64 string: {response3.GetOutputText()}");
Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
// SAMPLE: Analyzes file from a file upload through Responses API
2+
// PAGE: https://platform.openai.com/docs/guides/images-vision?api-mode=responses&format=file#analyze-images
3+
// GUIDANCE: Instructions to run this code: https://aka.ms/oai/net/start
4+
#pragma warning disable OPENAI001
5+
6+
#:package OpenAI@2.*
7+
#:property PublishAot=false
8+
9+
using OpenAI.Files;
10+
using OpenAI.Responses;
11+
12+
string key = Environment.GetEnvironmentVariable("OPENAI_API_KEY")!;
13+
OpenAIResponseClient client = new(model: "gpt-5", apiKey: key);
14+
15+
using var http = new HttpClient();
16+
17+
// Download an image as stream
18+
using var stream = await http.GetStreamAsync("https://openai-documentation.vercel.app/images/cat_and_otter.png");
19+
20+
OpenAIFileClient files = new(key);
21+
OpenAIFile file = await files.UploadFileAsync(BinaryData.FromStream(stream), "cat_and_otter.png", FileUploadPurpose.Vision);
22+
23+
OpenAIResponse response = (OpenAIResponse)client.CreateResponse([
24+
ResponseItem.CreateUserMessageItem([
25+
ResponseContentPart.CreateInputFilePart(file.Id),
26+
ResponseContentPart.CreateInputTextPart("what's in this image?")
27+
])
28+
]);
29+
30+
Console.WriteLine(response.GetOutputText());
Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
// SAMPLE: Analyzes image by passing an image URL through Responses API
2+
// PAGE: https://platform.openai.com/docs/guides/images-vision?api-mode=responses&format=url#analyze-images
3+
// GUIDANCE: Instructions to run this code: https://aka.ms/oai/net/start
4+
#pragma warning disable OPENAI001
5+
6+
#:package OpenAI@2.*
7+
#:property PublishAot=false
8+
9+
using OpenAI.Responses;
10+
11+
string key = Environment.GetEnvironmentVariable("OPENAI_API_KEY")!;
12+
OpenAIResponseClient client = new(model: "gpt-5", apiKey: key);
13+
14+
OpenAIResponse response = (OpenAIResponse)client.CreateResponse([
15+
ResponseItem.CreateUserMessageItem([
16+
ResponseContentPart.CreateInputTextPart("What is in this image?"),
17+
ResponseContentPart.CreateInputImagePart(new Uri("https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"))
18+
])
19+
]);
20+
21+
Console.WriteLine(response.GetOutputText());

docs/guides/images-vision/responses/generate-edit-images.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
OpenAIResponse response = (OpenAIResponse)client.CreateResponse(
1717
BinaryContent.Create(BinaryData.FromObjectAsJson(new
1818
{
19-
model = "gpt-4.1",
19+
model = "gpt-5",
2020
input = "Generate an image of gray tabby cat hugging an otter with an orange scarf",
2121
tools = new[]
2222
{

0 commit comments

Comments
 (0)