You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
/// Ask the API to complete the request using the specified message and image(s). Any parameters will fall back to default values specified in <see cref="DefaultChatRequestArgs"/> if present, except for <see cref="ChatRequest.Model"/>, which will default to <see cref="Model.GPT4_Vision"/>.
131
+
/// </summary>
132
+
/// <param name="userMessage">The user message text to use in the generation.</param>
133
+
/// <param name="images">The images to use in the generation.</param>
134
+
/// <returns>The <see cref="ChatResult"/> with the API response.</returns>
/// <param name="text">The text to send in the message</param>
29
+
publicChatMessage(ChatMessageRolerole,stringtext)
27
30
{
28
31
this.Role=role;
29
-
this.Content=content;
32
+
this.TextContent=text;
33
+
}
34
+
35
+
/// <summary>
36
+
/// Constructor for a new Chat Message with text and one or more images
37
+
/// </summary>
38
+
/// <param name="role">The role of the message, which can be "system", "assistant" or "user"</param>
39
+
/// <param name="text">The text to send in the message. May be null if only sending image(s).</param>
40
+
/// <param name="imageInputs">Optionally add one or more images to the message if using a GPT Vision model. Consider using <see cref="ImageInput.FromFile(string, string)"/> to load an image from a local file, or <see cref="ImageInput.FromImageUrl(string, string)"/> to point to an image via URL. Please see <seealso href="https://platform.openai.com/docs/guides/vision"/> for more information and limitations.</param>
/// This is only used for serializing the request into JSON, do not use it directly.
53
82
/// </summary>
54
83
[JsonProperty("content")]
55
-
publicstringContent{get;set;}
84
+
[JsonConverter(typeof(ContentDataConverter))]
85
+
internalIList<ContentItem>ContentItems
86
+
{
87
+
get
88
+
{
89
+
List<ContentItem>items=newList<ContentItem>();
90
+
if(!string.IsNullOrEmpty(TextContent))
91
+
{
92
+
items.Add(newContentItem(TextContent));
93
+
}
94
+
if(Images!=null&&Images.Count>0)
95
+
{
96
+
foreach(varimageinImages)
97
+
{
98
+
items.Add(newContentItem(image));
99
+
}
100
+
}
101
+
102
+
returnitems;
103
+
}
104
+
set
105
+
{
106
+
foreach(variteminvalue)
107
+
{
108
+
if(item.Type=="text")
109
+
{
110
+
TextContent=item.Text;
111
+
}
112
+
elseif(item.Type=="image_url")
113
+
{
114
+
Images.Add(item.Image);
115
+
}
116
+
}
117
+
}
118
+
}
56
119
57
120
/// <summary>
58
121
/// An optional name of the user in a multi-user chat
59
122
/// </summary>
60
123
[JsonProperty("name")]
61
124
publicstringName{get;set;}
125
+
126
+
/// <summary>
127
+
/// Optionally add one or more images to the message if using a GPT Vision model. Please see <seealso href="https://platform.openai.com/docs/guides/vision"/> for more information and limitations.
/// This is a helper class to serialize the content of the message to JSON
134
+
/// </summary>
135
+
internalclassContentItem
136
+
{
137
+
privatestringtext;
138
+
privateImageInputimage;
139
+
140
+
/// <summary>
141
+
/// The type of content to send to the API. This can be "text" or "image_url".
142
+
/// </summary>
143
+
[JsonProperty("type")]
144
+
publicstringType{get;set;}="text";
145
+
146
+
/// <summary>
147
+
/// Sends text to the API. This is the default type.
148
+
/// </summary>
149
+
[JsonProperty("text")]
150
+
publicstringText
151
+
{
152
+
get
153
+
{
154
+
if(Type=="text")
155
+
returntext;
156
+
else
157
+
returnnull;
158
+
}
159
+
160
+
set
161
+
{
162
+
text=value;
163
+
image=null;
164
+
Type="text";
165
+
}
166
+
}
167
+
168
+
/// <summary>
169
+
/// Send an image to GPT Vision. Please see <seealso href="https://platform.openai.com/docs/guides/vision"/> for more information and limitations."/>
170
+
/// </summary>
171
+
[JsonProperty("image_url")]
172
+
publicImageInputImage
173
+
{
174
+
get
175
+
{
176
+
if(Type=="image_url")
177
+
returnimage;
178
+
else
179
+
returnnull;
180
+
}
181
+
182
+
set
183
+
{
184
+
image=value;
185
+
text=null;
186
+
Type="image_url";
187
+
}
188
+
}
189
+
190
+
/// <summary>
191
+
/// Creates an empty <see cref="ContentItem"/>
192
+
/// </summary>
193
+
publicContentItem()
194
+
{
195
+
196
+
}
197
+
198
+
/// <summary>
199
+
/// Creates a new <see cref="ContentItem"/> with the given text
200
+
/// </summary>
201
+
/// <param name="text">The text to send to the API</param>
202
+
publicContentItem(stringtext)
203
+
{
204
+
this.Text=text;
205
+
this.Type="text";
206
+
}
207
+
208
+
/// <summary>
209
+
/// Creates a new <see cref="ContentItem"/> with the given image
210
+
/// </summary>
211
+
/// <param name="image">The image to send to the API. Consider using <see cref="ImageInput.FromFile(string, string)"/> to load an image from a local file, or <see cref="ImageInput.FromImageUrl(string, string)"/> to point to an image via URL.</param>
212
+
publicContentItem(ImageInputimage)
213
+
{
214
+
this.Image=image;
215
+
this.Type="image_url";
216
+
}
217
+
}
218
+
219
+
/// <summary>
220
+
/// Represents an image to send to the API in a chat message as part of GPT Vision.
221
+
/// </summary>
222
+
publicclassImageInput
223
+
{
224
+
/// <summary>
225
+
/// Either a URL of the image or the base64 encoded image data
226
+
/// </summary>
227
+
[JsonProperty("url")]
228
+
publicstringUrl{get;set;}
229
+
230
+
/// <summary>
231
+
/// By controlling the detail parameter, which has three options, low, high, or auto, you have control over how the model processes the image and generates its textual understanding.
232
+
/// </summary>
233
+
[JsonProperty("detail")]
234
+
publicstringDetail{get;set;}="auto";
235
+
236
+
/// <summary>
237
+
/// Instantiates a new ImageInput object with the given url
238
+
/// </summary>
239
+
/// <param name="url">A link to the image</param>
240
+
/// <param name="detail">By controlling the detail parameter, which has three options, low, high, or auto, you have control over how the model processes the image and generates its textual understanding</param>
241
+
publicImageInput(stringurl,stringdetail="auto")
242
+
{
243
+
this.Url=url;
244
+
this.Detail=detail;
245
+
}
246
+
247
+
/// <summary>
248
+
/// Instantiates a new ImageInput object with the given image data bytes
249
+
/// </summary>
250
+
/// <param name="imageData">The image as bytes to be base64 encoded. OpenAI currently supports PNG (.png), JPEG (.jpeg and .jpg), WEBP (.webp), and non-animated GIF (.gif)</param>
251
+
/// <param name="detail">By controlling the detail parameter, which has three options, low, high, or auto, you have control over how the model processes the image and generates its textual understanding</param>
/// Instantiates a new ImageInput object with the given image loaded from disk
260
+
/// </summary>
261
+
/// <param name="filePath">The local file path of the image. OpenAI currently supports PNG (.png), JPEG (.jpeg and .jpg), WEBP (.webp), and non-animated GIF (.gif)</param>
262
+
/// <param name="detail">By controlling the detail parameter, which has three options, low, high, or auto, you have control over how the model processes the image and generates its textual understanding</param>
/// Instantiates a new ImageInput object with the given image data bytes
271
+
/// </summary>
272
+
/// <param name="imageData">The image as bytes to be base64 encoded</param>
273
+
/// <param name="detail">By controlling the detail parameter, which has three options, low, high, or auto, you have control over how the model processes the image and generates its textual understanding</param>
/// Instantiates a new ImageInput object with the given url
282
+
/// </summary>
283
+
/// <param name="url">A link to the image</param>
284
+
/// <param name="detail">By controlling the detail parameter, which has three options, low, high, or auto, you have control over how the model processes the image and generates its textual understanding</param>
/// By default, the model will use the auto setting which will look at the image input size and decide if it should use the low or high setting.
293
+
/// </summary>
294
+
publicconststringDetailAuto="auto";
295
+
/// <summary>
296
+
/// low will disable the “high res” model. The model will receive a low-res 512px x 512px version of the image, and represent the image with a budget of 65 tokens. This allows the API to return faster responses and consume fewer input tokens for use cases that do not require high detail.
297
+
/// </summary>
298
+
publicconststringDetailLow="low";
299
+
/// <summary>
300
+
/// high will enable “high res” mode, which first allows the model to see the low res image and then creates detailed crops of input images as 512px squares based on the input image size. Each of the detailed crops uses twice the token budget (65 tokens) for a total of 129 tokens.
0 commit comments