Skip to content

Commit 208a2a8

Browse files
authored
Merge branch 'main' into nvirdy/verbosity
2 parents 2ca651b + e3d1767 commit 208a2a8

File tree

5 files changed

+59
-5
lines changed

5 files changed

+59
-5
lines changed

README.md

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1159,11 +1159,14 @@ Models are represented as a typealias `typealias Model = String`.
11591159

11601160
```swift
11611161
public extension Model {
1162+
static let gpt5_1 = "gpt-5.1"
1163+
static let gpt5_1_chat_latest = "gpt-5.1-chat-latest"
1164+
11621165
static let gpt5 = "gpt-5"
11631166
static let gpt5_mini = "gpt-5-mini"
11641167
static let gpt5_nano = "gpt-5-nano"
11651168
static let gpt5_chat = "gpt-5-chat"
1166-
1169+
11671170
static let gpt4_1 = "gpt-4.1"
11681171
static let gpt4_1_mini = "gpt-4.1-mini"
11691172
static let gpt4_1_nano = "gpt-4.1-nano"

Sources/OpenAI/Public/Models/ChatQuery.swift

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ public struct ChatQuery: Equatable, Codable, Streamable, Sendable {
1515
public let messages: [Self.ChatCompletionMessageParam]
1616
/// Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a wide range of models with different capabilities, performance characteristics, and price points. Refer to the [model guide](https://platform.openai.com/docs/models) to browse and compare available models.
1717
public let model: Model
18-
/// Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are minimal, low, medium, and high. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
18+
/// Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are none, minimal, low, medium, and high. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
1919
///
2020
/// - Note: o-series models only
2121
public let reasoningEffort: ReasoningEffort?
@@ -922,6 +922,7 @@ public struct ChatQuery: Equatable, Codable, Streamable, Sendable {
922922
}
923923

924924
public enum ReasoningEffort: Codable, Equatable, Sendable {
925+
case none
925926
case minimal
926927
case low
927928
case medium
@@ -935,6 +936,8 @@ public struct ChatQuery: Equatable, Codable, Streamable, Sendable {
935936
public func encode(to encoder: any Encoder) throws {
936937
var container = encoder.singleValueContainer()
937938
switch self {
939+
case .none:
940+
try container.encode("none")
938941
case .minimal:
939942
try container.encode("minimal")
940943
case .low:
@@ -952,6 +955,8 @@ public struct ChatQuery: Equatable, Codable, Streamable, Sendable {
952955
let container = try decoder.singleValueContainer()
953956
let rawValue = try container.decode(String.self)
954957
switch rawValue {
958+
case "none":
959+
self = .none
955960
case "minimal":
956961
self = .minimal
957962
case "low":

Sources/OpenAI/Public/Models/Models/Models.swift

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,14 @@ public extension Model {
5353
@available(*, deprecated, message: "On April 14th, 2025, developers were notified that the gpt-4.5-preview model is deprecated and will be removed from the API in the coming months. Recommended replacement: gpt-4.1")
5454
static let gpt4_5_preview = "gpt-4.5-preview"
5555

56+
// GPT-5.1
57+
58+
/// `gpt-5.1` Enhanced version of GPT-5 with improved reasoning and performance
59+
static let gpt5_1 = "gpt-5.1"
60+
61+
/// `gpt-5.1-chat-latest` Latest GPT-5.1 model optimized for chat interactions
62+
static let gpt5_1_chat_latest = "gpt-5.1-chat-latest"
63+
5664
// GPT-5
5765

5866
/// `gpt-5` OpenAI's best AI system with significant leap in intelligence, designed for logic and multi-step tasks with deep reasoning
@@ -261,7 +269,7 @@ public extension Model {
261269
// reasoning
262270
.o4_mini, o3, o3_mini, .o1,
263271
// flagship
264-
.gpt5, .gpt5_mini, .gpt5_nano, .gpt5_chat, .gpt4_1, .gpt4_o, .gpt_4o_audio_preview, chatgpt_4o_latest,
272+
.gpt5, .gpt5_mini, .gpt5_nano, .gpt5_chat, .gpt5_1, .gpt5_1_chat_latest, .gpt4_1, .gpt4_o, .gpt_4o_audio_preview, chatgpt_4o_latest,
265273
// cost-optimized
266274
.gpt4_1_mini, .gpt4_1_nano, .gpt4_o_mini, .gpt_4o_mini_audio_preview,
267275
// tool-specific
@@ -274,7 +282,7 @@ public extension Model {
274282
// reasoning
275283
.o4_mini, .o3, .o3_mini, .o1, .o1_pro,
276284
// flagship
277-
.gpt5, .gpt5_mini, .gpt5_nano, .gpt5_chat, .gpt4_1, .gpt4_o, .chatgpt_4o_latest,
285+
.gpt5, .gpt5_mini, .gpt5_nano, .gpt5_chat, .gpt5_1, .gpt5_1_chat_latest, .gpt4_1, .gpt4_o, .chatgpt_4o_latest,
278286
// cost-optimized
279287
.gpt4_1_mini, .gpt4_1_nano, .gpt4_o_mini,
280288
.gpt4_turbo, .gpt4, .gpt3_5Turbo,

Sources/OpenAI/Public/Schemas/Generated/Components.swift

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4861,13 +4861,14 @@ public enum Components {
48614861
///
48624862
/// Constrains effort on reasoning for
48634863
/// [reasoning models](https://platform.openai.com/docs/guides/reasoning).
4864-
/// Currently supported values are `minimal`, `low`, `medium`, and `high`. Reducing
4864+
/// Currently supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
48654865
/// reasoning effort can result in faster responses and fewer tokens used
48664866
/// on reasoning in a response.
48674867
///
48684868
///
48694869
/// - Remark: Generated from `#/components/schemas/ReasoningEffort`.
48704870
@frozen public enum ReasoningEffort: String, Codable, Hashable, Sendable, CaseIterable {
4871+
case none = "none"
48714872
case minimal = "minimal"
48724873
case low = "low"
48734874
case medium = "medium"

Tests/OpenAITests/OpenAITestsDecoder.swift

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -418,6 +418,43 @@ class OpenAITestsDecoder: XCTestCase {
418418
XCTAssertNil(decodedNil.verbosity)
419419
}
420420

421+
func testChatQueryWithReasoningEffortNone() throws {
422+
let chatQuery = ChatQuery(
423+
messages: [
424+
.init(role: .user, content: "Who are you?")!
425+
],
426+
model: .gpt5_1,
427+
reasoningEffort: ChatQuery.ReasoningEffort.none
428+
)
429+
let expectedValue = """
430+
{
431+
"model": "gpt-5.1",
432+
"messages": [
433+
{
434+
"role": "user",
435+
"content": "Who are you?"
436+
}
437+
],
438+
"reasoning_effort": "none",
439+
"stream": false
440+
}
441+
"""
442+
443+
let chatQueryAsDict = try jsonDataAsNSDictionary(JSONEncoder().encode(chatQuery))
444+
let expectedValueAsDict = try jsonDataAsNSDictionary(expectedValue.data(using: .utf8)!)
445+
446+
XCTAssertEqual(chatQueryAsDict, expectedValueAsDict)
447+
}
448+
449+
func testReasoningEffortDecodingNone() throws {
450+
let json = """
451+
{ "effort": "none" }
452+
"""
453+
let data = json.data(using: .utf8)!
454+
let decoded = try JSONDecoder().decode(Components.Schemas.Reasoning.self, from: data)
455+
XCTAssertEqual(decoded.effort, Components.Schemas.ReasoningEffort.none)
456+
}
457+
421458
func testEmbeddings() async throws {
422459
let data = """
423460
{

0 commit comments

Comments
 (0)