@@ -80,8 +80,13 @@ class ChatOpenAIOptions extends ChatModelOptions {
8080 /// {@macro chat_openai_options}
8181 const ChatOpenAIOptions ({
8282 super .model,
83+ this .store,
84+ this .reasoningEffort,
85+ this .metadata,
8386 this .frequencyPenalty,
8487 this .logitBias,
88+ this .logprobs,
89+ this .topLogprobs,
8590 this .maxTokens,
8691 this .n,
8792 this .presencePenalty,
@@ -95,6 +100,7 @@ class ChatOpenAIOptions extends ChatModelOptions {
95100 this .parallelToolCalls,
96101 this .serviceTier,
97102 this .user,
103+ this .verbosity,
98104 super .concurrencyLimit,
99105 });
100106
@@ -110,6 +116,33 @@ class ChatOpenAIOptions extends ChatModelOptions {
110116 /// See https://platform.openai.com/docs/api-reference/chat/create#chat-create-logit_bias
111117 final Map <String , int >? logitBias;
112118
119+ /// Whether or not to store the output of this chat completion request.
120+ ///
121+ /// See https://platform.openai.com/docs/api-reference/chat/create#chat-create-store
122+ final bool ? store;
123+
124+ /// Constrains effort on reasoning for reasoning models.
125+ /// Supported values are `minimal` , `low` , `medium` , and `high` .
126+ ///
127+ /// See https://platform.openai.com/docs/api-reference/chat/create#chat-create-reasoning_effort
128+ final ChatOpenAIReasoningEffort ? reasoningEffort;
129+
130+ /// Developer-defined tags and values used for filtering completions.
131+ ///
132+ /// See https://platform.openai.com/docs/api-reference/chat/create#chat-create-metadata
133+ final Map <String , String >? metadata;
134+
135+ /// Whether to return log probabilities of the output tokens.
136+ ///
137+ /// See https://platform.openai.com/docs/api-reference/chat/create#chat-create-logprobs
138+ final bool ? logprobs;
139+
140+ /// Number of most likely tokens to return at each token position when
141+ /// [logprobs] is set to true.
142+ ///
143+ /// See https://platform.openai.com/docs/api-reference/chat/create#chat-create-top_logprobs
144+ final int ? topLogprobs;
145+
113146 /// The maximum number of tokens to generate in the chat completion.
114147 /// Defaults to inf.
115148 ///
@@ -188,11 +221,23 @@ class ChatOpenAIOptions extends ChatModelOptions {
188221 /// Ref: https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
189222 final String ? user;
190223
224+ /// Constrains the verbosity of the model's response. Lower values will result
225+ /// in more concise responses, while higher values will be more verbose.
226+ /// Supported values are `low` , `medium` , and `high` .
227+ ///
228+ /// See https://platform.openai.com/docs/api-reference/chat/create#chat-create-verbosity
229+ final ChatOpenAIVerbosity ? verbosity;
230+
191231 @override
192232 ChatOpenAIOptions copyWith ({
193233 final String ? model,
234+ final bool ? store,
235+ final ChatOpenAIReasoningEffort ? reasoningEffort,
236+ final Map <String , String >? metadata,
194237 final double ? frequencyPenalty,
195238 final Map <String , int >? logitBias,
239+ final bool ? logprobs,
240+ final int ? topLogprobs,
196241 final int ? maxTokens,
197242 final int ? n,
198243 final double ? presencePenalty,
@@ -206,12 +251,18 @@ class ChatOpenAIOptions extends ChatModelOptions {
206251 final bool ? parallelToolCalls,
207252 final ChatOpenAIServiceTier ? serviceTier,
208253 final String ? user,
254+ final ChatOpenAIVerbosity ? verbosity,
209255 final int ? concurrencyLimit,
210256 }) {
211257 return ChatOpenAIOptions (
212258 model: model ?? this .model,
259+ store: store ?? this .store,
260+ reasoningEffort: reasoningEffort ?? this .reasoningEffort,
261+ metadata: metadata ?? this .metadata,
213262 frequencyPenalty: frequencyPenalty ?? this .frequencyPenalty,
214263 logitBias: logitBias ?? this .logitBias,
264+ logprobs: logprobs ?? this .logprobs,
265+ topLogprobs: topLogprobs ?? this .topLogprobs,
215266 maxTokens: maxTokens ?? this .maxTokens,
216267 n: n ?? this .n,
217268 presencePenalty: presencePenalty ?? this .presencePenalty,
@@ -225,6 +276,7 @@ class ChatOpenAIOptions extends ChatModelOptions {
225276 parallelToolCalls: parallelToolCalls ?? this .parallelToolCalls,
226277 serviceTier: serviceTier ?? this .serviceTier,
227278 user: user ?? this .user,
279+ verbosity: verbosity ?? this .verbosity,
228280 concurrencyLimit: concurrencyLimit ?? this .concurrencyLimit,
229281 );
230282 }
@@ -233,8 +285,13 @@ class ChatOpenAIOptions extends ChatModelOptions {
233285 ChatOpenAIOptions merge (covariant final ChatOpenAIOptions ? other) {
234286 return copyWith (
235287 model: other? .model,
288+ store: other? .store,
289+ reasoningEffort: other? .reasoningEffort,
290+ metadata: other? .metadata,
236291 frequencyPenalty: other? .frequencyPenalty,
237292 logitBias: other? .logitBias,
293+ logprobs: other? .logprobs,
294+ topLogprobs: other? .topLogprobs,
238295 maxTokens: other? .maxTokens,
239296 n: other? .n,
240297 presencePenalty: other? .presencePenalty,
@@ -248,6 +305,7 @@ class ChatOpenAIOptions extends ChatModelOptions {
248305 parallelToolCalls: other? .parallelToolCalls,
249306 serviceTier: other? .serviceTier,
250307 user: other? .user,
308+ verbosity: other? .verbosity,
251309 concurrencyLimit: other? .concurrencyLimit,
252310 );
253311 }
@@ -257,9 +315,15 @@ class ChatOpenAIOptions extends ChatModelOptions {
257315 return identical (this , other) ||
258316 runtimeType == other.runtimeType &&
259317 model == other.model &&
318+ store == other.store &&
319+ reasoningEffort == other.reasoningEffort &&
320+ const MapEquality <String , String >()
321+ .equals (metadata, other.metadata) &&
260322 frequencyPenalty == other.frequencyPenalty &&
261323 const MapEquality <String , int >()
262324 .equals (logitBias, other.logitBias) &&
325+ logprobs == other.logprobs &&
326+ topLogprobs == other.topLogprobs &&
263327 maxTokens == other.maxTokens &&
264328 n == other.n &&
265329 presencePenalty == other.presencePenalty &&
@@ -273,14 +337,20 @@ class ChatOpenAIOptions extends ChatModelOptions {
273337 parallelToolCalls == other.parallelToolCalls &&
274338 serviceTier == other.serviceTier &&
275339 user == other.user &&
340+ verbosity == other.verbosity &&
276341 concurrencyLimit == other.concurrencyLimit;
277342 }
278343
279344 @override
280345 int get hashCode {
281346 return model.hashCode ^
347+ store.hashCode ^
348+ reasoningEffort.hashCode ^
349+ const MapEquality <String , String >().hash (metadata) ^
282350 frequencyPenalty.hashCode ^
283351 const MapEquality <String , int >().hash (logitBias) ^
352+ logprobs.hashCode ^
353+ topLogprobs.hashCode ^
284354 maxTokens.hashCode ^
285355 n.hashCode ^
286356 presencePenalty.hashCode ^
@@ -294,6 +364,7 @@ class ChatOpenAIOptions extends ChatModelOptions {
294364 parallelToolCalls.hashCode ^
295365 serviceTier.hashCode ^
296366 user.hashCode ^
367+ verbosity.hashCode ^
297368 concurrencyLimit.hashCode;
298369 }
299370}
@@ -408,6 +479,33 @@ class ChatOpenAIJsonSchema {
408479 }
409480}
410481
482+ /// Constrains effort on reasoning for reasoning models.
483+ enum ChatOpenAIReasoningEffort {
484+ /// Minimal effort
485+ minimal,
486+
487+ /// Low effort
488+ low,
489+
490+ /// Medium effort
491+ medium,
492+
493+ /// High effort
494+ high
495+ }
496+
497+ /// Constrains the verbosity of the model's response.
498+ enum ChatOpenAIVerbosity {
499+ /// More concise responses
500+ low,
501+
502+ /// Medium verbosity responses
503+ medium,
504+
505+ /// More verbose responses
506+ high
507+ }
508+
411509/// Specifies the latency tier to use for processing the request.
412510/// This is relevant for customers subscribed to the scale tier service.
413511enum ChatOpenAIServiceTier {
0 commit comments