Skip to content

Commit 0d6f3fc

Browse files
authored
Add support for new Response properties (#845)
Add support for the latest Response properties
1 parent 04bc7d3 commit 0d6f3fc

25 files changed

+10600
-9373
lines changed

api/OpenAI.net8.0.cs

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1723,6 +1723,8 @@ public class ChatCompletionOptions : IJsonModel<ChatCompletionOptions>, IPersist
17231723
[Experimental("OPENAI001")]
17241724
public ChatResponseModalities ResponseModalities { get; set; }
17251725
[Experimental("OPENAI001")]
1726+
public string SafetyIdentifier { get; set; }
1727+
[Experimental("OPENAI001")]
17261728
public long? Seed { get; set; }
17271729
[Experimental("OPENAI001")]
17281730
public ChatServiceTier? ServiceTier { get; set; }
@@ -5591,13 +5593,15 @@ public enum MessageStatus {
55915593
[Experimental("OPENAI001")]
55925594
public class OpenAIResponse : IJsonModel<OpenAIResponse>, IPersistableModel<OpenAIResponse> {
55935595
public bool? BackgroundModeEnabled { get; }
5596+
public string ConversationId { get; }
55945597
public DateTimeOffset CreatedAt { get; }
55955598
public string EndUserId { get; }
55965599
public ResponseError Error { get; }
55975600
public string Id { get; }
55985601
public ResponseIncompleteStatusDetails IncompleteStatusDetails { get; }
55995602
public string Instructions { get; }
56005603
public int? MaxOutputTokenCount { get; }
5604+
public int? MaxToolCallCount { get; }
56015605
public IDictionary<string, string> Metadata { get; }
56025606
public string Model { get; }
56035607
public IList<ResponseItem> OutputItems { get; }
@@ -5608,12 +5612,14 @@ public class OpenAIResponse : IJsonModel<OpenAIResponse>, IPersistableModel<Open
56085612
public ref JsonPatch Patch { get; }
56095613
public string PreviousResponseId { get; }
56105614
public ResponseReasoningOptions ReasoningOptions { get; }
5615+
public string SafetyIdentifier { get; }
56115616
public ResponseServiceTier? ServiceTier { get; }
56125617
public ResponseStatus? Status { get; }
56135618
public float? Temperature { get; }
56145619
public ResponseTextOptions TextOptions { get; }
56155620
public ResponseToolChoice ToolChoice { get; }
56165621
public IList<ResponseTool> Tools { get; }
5622+
public int? TopLogProbabilityCount { get; }
56175623
public float? TopP { get; }
56185624
public ResponseTruncationMode? TruncationMode { get; }
56195625
public ResponseTokenUsage Usage { get; }
@@ -5670,7 +5676,7 @@ public class OpenAIResponseClient {
56705676
[Experimental("OPENAI001")]
56715677
public static class OpenAIResponsesModelFactory {
56725678
public static MessageResponseItem MessageResponseItem(string id = null, MessageRole role = MessageRole.Assistant, MessageStatus? status = null);
5673-
public static OpenAIResponse OpenAIResponse(string id = null, DateTimeOffset createdAt = default, ResponseStatus? status = null, ResponseError error = null, ResponseTokenUsage usage = null, string endUserId = null, ResponseReasoningOptions reasoningOptions = null, int? maxOutputTokenCount = null, ResponseTextOptions textOptions = null, ResponseTruncationMode? truncationMode = null, ResponseIncompleteStatusDetails incompleteStatusDetails = null, IEnumerable<ResponseItem> outputItems = null, bool parallelToolCallsEnabled = false, ResponseToolChoice toolChoice = null, string model = null, IDictionary<string, string> metadata = null, float? temperature = null, float? topP = null, ResponseServiceTier? serviceTier = null, string previousResponseId = null, bool? backgroundModeEnabled = null, string instructions = null, IEnumerable<ResponseTool> tools = null);
5679+
public static OpenAIResponse OpenAIResponse(string id = null, DateTimeOffset createdAt = default, ResponseStatus? status = null, ResponseError error = null, ResponseTokenUsage usage = null, string endUserId = null, string safetyIdentifier = null, ResponseReasoningOptions reasoningOptions = null, int? maxOutputTokenCount = null, int? maxToolCallCount = null, ResponseTextOptions textOptions = null, ResponseTruncationMode? truncationMode = null, ResponseIncompleteStatusDetails incompleteStatusDetails = null, IEnumerable<ResponseItem> outputItems = null, bool parallelToolCallsEnabled = false, ResponseToolChoice toolChoice = null, string model = null, IDictionary<string, string> metadata = null, float? temperature = null, int? topLogProbabilityCount = null, float? topP = null, ResponseServiceTier? serviceTier = null, string previousResponseId = null, bool? backgroundModeEnabled = null, string instructions = null, IEnumerable<ResponseTool> tools = null, string conversationId = null);
56745680
public static ReasoningResponseItem ReasoningResponseItem(string id = null, string encryptedContent = null, ReasoningStatus? status = null, IEnumerable<ReasoningSummaryPart> summaryParts = null);
56755681
public static ReasoningResponseItem ReasoningResponseItem(string id = null, string encryptedContent = null, ReasoningStatus? status = null, string summaryText = null);
56765682
public static ReferenceResponseItem ReferenceResponseItem(string id = null);
@@ -5764,10 +5770,12 @@ public enum ResponseContentPartKind {
57645770
[Experimental("OPENAI001")]
57655771
public class ResponseCreationOptions : IJsonModel<ResponseCreationOptions>, IPersistableModel<ResponseCreationOptions> {
57665772
public bool? BackgroundModeEnabled { get; set; }
5773+
public string ConversationId { get; set; }
57675774
public string EndUserId { get; set; }
57685775
public IList<IncludedResponseProperty> IncludedProperties { get; }
57695776
public string Instructions { get; set; }
57705777
public int? MaxOutputTokenCount { get; set; }
5778+
public int? MaxToolCallCount { get; set; }
57715779
public IDictionary<string, string> Metadata { get; }
57725780
public bool? ParallelToolCallsEnabled { get; set; }
57735781
[Serialization.JsonIgnore]
@@ -5776,12 +5784,14 @@ public class ResponseCreationOptions : IJsonModel<ResponseCreationOptions>, IPer
57765784
public ref JsonPatch Patch { get; }
57775785
public string PreviousResponseId { get; set; }
57785786
public ResponseReasoningOptions ReasoningOptions { get; set; }
5787+
public string SafetyIdentifier { get; set; }
57795788
public ResponseServiceTier? ServiceTier { get; set; }
57805789
public bool? StoredOutputEnabled { get; set; }
57815790
public float? Temperature { get; set; }
57825791
public ResponseTextOptions TextOptions { get; set; }
57835792
public ResponseToolChoice ToolChoice { get; set; }
57845793
public IList<ResponseTool> Tools { get; }
5794+
public int? TopLogProbabilityCount { get; set; }
57855795
public float? TopP { get; set; }
57865796
public ResponseTruncationMode? TruncationMode { get; set; }
57875797
protected virtual ResponseCreationOptions JsonModelCreateCore(ref Utf8JsonReader reader, ModelReaderWriterOptions options);

api/OpenAI.netstandard2.0.cs

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1521,6 +1521,7 @@ public class ChatCompletionOptions : IJsonModel<ChatCompletionOptions>, IPersist
15211521
public ChatReasoningEffortLevel? ReasoningEffortLevel { get; set; }
15221522
public ChatResponseFormat ResponseFormat { get; set; }
15231523
public ChatResponseModalities ResponseModalities { get; set; }
1524+
public string SafetyIdentifier { get; set; }
15241525
public long? Seed { get; set; }
15251526
public ChatServiceTier? ServiceTier { get; set; }
15261527
public IList<string> StopSequences { get; }
@@ -4907,13 +4908,15 @@ public enum MessageStatus {
49074908
}
49084909
public class OpenAIResponse : IJsonModel<OpenAIResponse>, IPersistableModel<OpenAIResponse> {
49094910
public bool? BackgroundModeEnabled { get; }
4911+
public string ConversationId { get; }
49104912
public DateTimeOffset CreatedAt { get; }
49114913
public string EndUserId { get; }
49124914
public ResponseError Error { get; }
49134915
public string Id { get; }
49144916
public ResponseIncompleteStatusDetails IncompleteStatusDetails { get; }
49154917
public string Instructions { get; }
49164918
public int? MaxOutputTokenCount { get; }
4919+
public int? MaxToolCallCount { get; }
49174920
public IDictionary<string, string> Metadata { get; }
49184921
public string Model { get; }
49194922
public IList<ResponseItem> OutputItems { get; }
@@ -4923,12 +4926,14 @@ public class OpenAIResponse : IJsonModel<OpenAIResponse>, IPersistableModel<Open
49234926
public ref JsonPatch Patch { get; }
49244927
public string PreviousResponseId { get; }
49254928
public ResponseReasoningOptions ReasoningOptions { get; }
4929+
public string SafetyIdentifier { get; }
49264930
public ResponseServiceTier? ServiceTier { get; }
49274931
public ResponseStatus? Status { get; }
49284932
public float? Temperature { get; }
49294933
public ResponseTextOptions TextOptions { get; }
49304934
public ResponseToolChoice ToolChoice { get; }
49314935
public IList<ResponseTool> Tools { get; }
4936+
public int? TopLogProbabilityCount { get; }
49324937
public float? TopP { get; }
49334938
public ResponseTruncationMode? TruncationMode { get; }
49344939
public ResponseTokenUsage Usage { get; }
@@ -4981,7 +4986,7 @@ public class OpenAIResponseClient {
49814986
}
49824987
public static class OpenAIResponsesModelFactory {
49834988
public static MessageResponseItem MessageResponseItem(string id = null, MessageRole role = MessageRole.Assistant, MessageStatus? status = null);
4984-
public static OpenAIResponse OpenAIResponse(string id = null, DateTimeOffset createdAt = default, ResponseStatus? status = null, ResponseError error = null, ResponseTokenUsage usage = null, string endUserId = null, ResponseReasoningOptions reasoningOptions = null, int? maxOutputTokenCount = null, ResponseTextOptions textOptions = null, ResponseTruncationMode? truncationMode = null, ResponseIncompleteStatusDetails incompleteStatusDetails = null, IEnumerable<ResponseItem> outputItems = null, bool parallelToolCallsEnabled = false, ResponseToolChoice toolChoice = null, string model = null, IDictionary<string, string> metadata = null, float? temperature = null, float? topP = null, ResponseServiceTier? serviceTier = null, string previousResponseId = null, bool? backgroundModeEnabled = null, string instructions = null, IEnumerable<ResponseTool> tools = null);
4989+
public static OpenAIResponse OpenAIResponse(string id = null, DateTimeOffset createdAt = default, ResponseStatus? status = null, ResponseError error = null, ResponseTokenUsage usage = null, string endUserId = null, string safetyIdentifier = null, ResponseReasoningOptions reasoningOptions = null, int? maxOutputTokenCount = null, int? maxToolCallCount = null, ResponseTextOptions textOptions = null, ResponseTruncationMode? truncationMode = null, ResponseIncompleteStatusDetails incompleteStatusDetails = null, IEnumerable<ResponseItem> outputItems = null, bool parallelToolCallsEnabled = false, ResponseToolChoice toolChoice = null, string model = null, IDictionary<string, string> metadata = null, float? temperature = null, int? topLogProbabilityCount = null, float? topP = null, ResponseServiceTier? serviceTier = null, string previousResponseId = null, bool? backgroundModeEnabled = null, string instructions = null, IEnumerable<ResponseTool> tools = null, string conversationId = null);
49854990
public static ReasoningResponseItem ReasoningResponseItem(string id = null, string encryptedContent = null, ReasoningStatus? status = null, IEnumerable<ReasoningSummaryPart> summaryParts = null);
49864991
public static ReasoningResponseItem ReasoningResponseItem(string id = null, string encryptedContent = null, ReasoningStatus? status = null, string summaryText = null);
49874992
public static ReferenceResponseItem ReferenceResponseItem(string id = null);
@@ -5065,23 +5070,27 @@ public enum ResponseContentPartKind {
50655070
}
50665071
public class ResponseCreationOptions : IJsonModel<ResponseCreationOptions>, IPersistableModel<ResponseCreationOptions> {
50675072
public bool? BackgroundModeEnabled { get; set; }
5073+
public string ConversationId { get; set; }
50685074
public string EndUserId { get; set; }
50695075
public IList<IncludedResponseProperty> IncludedProperties { get; }
50705076
public string Instructions { get; set; }
50715077
public int? MaxOutputTokenCount { get; set; }
5078+
public int? MaxToolCallCount { get; set; }
50725079
public IDictionary<string, string> Metadata { get; }
50735080
public bool? ParallelToolCallsEnabled { get; set; }
50745081
[Serialization.JsonIgnore]
50755082
[EditorBrowsable(EditorBrowsableState.Never)]
50765083
public ref JsonPatch Patch { get; }
50775084
public string PreviousResponseId { get; set; }
50785085
public ResponseReasoningOptions ReasoningOptions { get; set; }
5086+
public string SafetyIdentifier { get; set; }
50795087
public ResponseServiceTier? ServiceTier { get; set; }
50805088
public bool? StoredOutputEnabled { get; set; }
50815089
public float? Temperature { get; set; }
50825090
public ResponseTextOptions TextOptions { get; set; }
50835091
public ResponseToolChoice ToolChoice { get; set; }
50845092
public IList<ResponseTool> Tools { get; }
5093+
public int? TopLogProbabilityCount { get; set; }
50855094
public float? TopP { get; set; }
50865095
public ResponseTruncationMode? TruncationMode { get; set; }
50875096
protected virtual ResponseCreationOptions JsonModelCreateCore(ref Utf8JsonReader reader, ModelReaderWriterOptions options);

specification/base/typespec/chat/models.tsp

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -192,15 +192,6 @@ model CreateChatCompletionRequest {
192192
search_context_size?: WebSearchContextSize = "medium";
193193
};
194194

195-
@doc("""
196-
An integer between 0 and 20 specifying the number of most likely tokens to
197-
return at each token position, each with an associated log probability.
198-
`logprobs` must be set to `true` if this parameter is used.
199-
""")
200-
@minValue(0)
201-
@maxValue(20)
202-
top_logprobs?: int32 | null;
203-
204195
// Tool customization: apply a named union type
205196
@doc("""
206197
An object specifying the format that the model must output.

specification/base/typespec/common/models.tsp

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -249,6 +249,9 @@ model ModelResponsePropertiesForRequest {
249249
@minValue(0)
250250
@maxValue(2)
251251
temperature?: float32 | null = 1;
252+
253+
/** An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. */
254+
top_logprobs?: int32 | null;
252255

253256
@doc("""
254257
An alternative to sampling with temperature, called nucleus sampling,
@@ -265,6 +268,10 @@ model ModelResponsePropertiesForRequest {
265268
/** A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). */
266269
user?: string;
267270

271+
/**A stable identifier used to help detect users of your application that may be violating OpenAI's usage policies.
272+
The IDs should be a string that uniquely identifies each user. We recommend hashing their username or email address, in order to avoid sending us any identifying information. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).*/
273+
safety_identifier?: string;
274+
268275
service_tier?: ServiceTier;
269276
}
270277
model ModelResponsePropertiesForResponse {
@@ -278,6 +285,9 @@ model ModelResponsePropertiesForResponse {
278285
@maxValue(2)
279286
temperature: float32 | null;
280287

288+
/** An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. */
289+
top_logprobs?: int32 | null;
290+
281291
@doc("""
282292
An alternative to sampling with temperature, called nucleus sampling,
283293
where the model considers the results of the tokens with top_p probability
@@ -293,6 +303,10 @@ model ModelResponsePropertiesForResponse {
293303
/** A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). */
294304
user: string | null;
295305

306+
/**A stable identifier used to help detect users of your application that may be violating OpenAI's usage policies.
307+
The IDs should be a string that uniquely identifies each user. We recommend hashing their username or email address, in order to avoid sending us any identifying information. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).*/
308+
safety_identifier?: string;
309+
296310
service_tier?: ServiceTier;
297311
}
298312

specification/base/typespec/responses/models.tsp

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -87,6 +87,25 @@ model CreateResponse {
8787
* for more information.
8888
*/
8989
stream?: boolean | null = false;
90+
91+
/** The conversation that this response belongs to.
92+
* Items from this conversation are prepended to input_items for this response request.
93+
* Input items and output items from this response are automatically added to this conversation after this response completes. */
94+
conversation?: ConversationParam | null;
95+
}
96+
97+
/**The conversation that this response belongs to. Items from this conversation are prepended to `input_items` for this response request.
98+
Input items and output items from this response are automatically added to this conversation after this response completes.*/
99+
union ConversationParam {
100+
string,
101+
`ConversationParam-2`,
102+
}
103+
104+
/** The conversation that this response belongs to. */
105+
@summary("Conversation object")
106+
model `ConversationParam-2` {
107+
/** The unique ID of the conversation. */
108+
id: string;
90109
}
91110

92111
model Response {
@@ -149,6 +168,9 @@ model Response {
149168

150169
/** Whether to allow the model to run tool calls in parallel. */
151170
parallel_tool_calls: boolean = true;
171+
172+
/** The conversation that this response belongs to. Input items and output items from this response are automatically added to this conversation. */
173+
conversation?: `ConversationParam-2` | null;
152174
}
153175

154176
model ResponseProperties {
@@ -178,6 +200,9 @@ model ResponseProperties {
178200
/** An upper bound for the number of tokens that can be generated for a response, including visible output tokens and [reasoning tokens](/docs/guides/reasoning). */
179201
max_output_tokens?: int32 | null;
180202

203+
/** The maximum number of total calls to built-in tools that can be processed in a response. This maximum number applies across all built-in tool calls, not per individual tool. Any further attempts to call a tool by the model will be ignored. */
204+
max_tool_calls?: int32 | null;
205+
181206
@doc("""
182207
Inserts a system (or developer) message as the first item in the model's context.
183208

specification/client/responses.client.tsp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,8 @@ using TypeSpec.HttpClient.CSharp;
88

99
@@alternateType(CreateResponse.service_tier, DotNetResponseServiceTier);
1010
@@alternateType(Response.service_tier, DotNetResponseServiceTier);
11+
@@alternateType(CreateResponse.conversation, string);
12+
@@clientName(CreateResponse.conversation, "ConversationId");
1113

1214
// ------------ ItemResources ------------
1315
@@usage(ItemResource, Usage.input | Usage.output);

src/Custom/Responses/Internal/GeneratorStubs.cs

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -121,4 +121,5 @@ namespace OpenAI.Responses;
121121
[CodeGenType("DotNetCustomToolCallApprovalPolicyAlways")] internal partial class InternalDotNetCustomToolCallApprovalPolicyAlways { }
122122
[CodeGenType("DotNetCustomToolCallApprovalPolicyNever")] internal partial class InternalDotNetCustomToolCallApprovalPolicyNever { }
123123
[CodeGenType("UnknownCodeInterpreterToolOutput")] internal partial class InternalUnknownCodeInterpreterToolOutput {}
124-
[CodeGenType("UnknownCodeInterpreterContainerConfiguration")] internal partial class InternalUnknownCodeInterpreterContainerConfiguration {}
124+
[CodeGenType("UnknownCodeInterpreterContainerConfiguration")] internal partial class InternalUnknownCodeInterpreterContainerConfiguration {}
125+
[CodeGenType("ConversationParam2")] internal partial class InternalConversation {}

src/Custom/Responses/OpenAIResponse.cs

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,10 @@ public partial class OpenAIResponse
3030
[CodeGenMember("MaxOutputTokens")]
3131
public int? MaxOutputTokenCount { get; }
3232

33+
// CUSTOM: Renamed.
34+
[CodeGenMember("MaxToolCalls")]
35+
public int? MaxToolCallCount { get; }
36+
3337
// CUSTOM: Renamed.
3438
[CodeGenMember("Text")]
3539
public ResponseTextOptions TextOptions { get; }
@@ -58,6 +62,12 @@ public partial class OpenAIResponse
5862
[CodeGenMember("Model")]
5963
public string Model { get; }
6064

65+
public string ConversationId => Conversation.Id;
66+
67+
// CUSTOM: Renamed.
68+
[CodeGenMember("TopLogprobs")]
69+
public int? TopLogProbabilityCount { get; }
70+
6171
// CUSTOM: Made internal
6272
internal string Object { get; } = "response";
6373

0 commit comments

Comments
 (0)