Skip to content

Commit e3d1767

Browse files
authored
Merge pull request #393 from neelvirdy/nvirdy/none-reasoning
2 parents 28884fc + 9fd5a2c commit e3d1767

File tree

5 files changed

+60
-6
lines changed

5 files changed

+60
-6
lines changed

README.md

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1159,11 +1159,14 @@ Models are represented as a typealias `typealias Model = String`.
11591159

11601160
```swift
11611161
public extension Model {
1162+
static let gpt5_1 = "gpt-5.1"
1163+
static let gpt5_1_chat_latest = "gpt-5.1-chat-latest"
1164+
11621165
static let gpt5 = "gpt-5"
11631166
static let gpt5_mini = "gpt-5-mini"
11641167
static let gpt5_nano = "gpt-5-nano"
11651168
static let gpt5_chat = "gpt-5-chat"
1166-
1169+
11671170
static let gpt4_1 = "gpt-4.1"
11681171
static let gpt4_1_mini = "gpt-4.1-mini"
11691172
static let gpt4_1_nano = "gpt-4.1-nano"

Sources/OpenAI/Public/Models/ChatQuery.swift

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ public struct ChatQuery: Equatable, Codable, Streamable, Sendable {
1515
public let messages: [Self.ChatCompletionMessageParam]
1616
/// Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a wide range of models with different capabilities, performance characteristics, and price points. Refer to the [model guide](https://platform.openai.com/docs/models) to browse and compare available models.
1717
public let model: Model
18-
/// Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are minimal, low, medium, and high. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
18+
/// Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are none, minimal, low, medium, and high. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
1919
///
2020
/// - Note: o-series models only
2121
public let reasoningEffort: ReasoningEffort?
@@ -922,6 +922,7 @@ public struct ChatQuery: Equatable, Codable, Streamable, Sendable {
922922
}
923923

924924
public enum ReasoningEffort: Codable, Equatable, Sendable {
925+
case none
925926
case minimal
926927
case low
927928
case medium
@@ -935,6 +936,8 @@ public struct ChatQuery: Equatable, Codable, Streamable, Sendable {
935936
public func encode(to encoder: any Encoder) throws {
936937
var container = encoder.singleValueContainer()
937938
switch self {
939+
case .none:
940+
try container.encode("none")
938941
case .minimal:
939942
try container.encode("minimal")
940943
case .low:
@@ -952,6 +955,8 @@ public struct ChatQuery: Equatable, Codable, Streamable, Sendable {
952955
let container = try decoder.singleValueContainer()
953956
let rawValue = try container.decode(String.self)
954957
switch rawValue {
958+
case "none":
959+
self = .none
955960
case "minimal":
956961
self = .minimal
957962
case "low":

Sources/OpenAI/Public/Models/Models/Models.swift

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,14 @@ public extension Model {
5353
@available(*, deprecated, message: "On April 14th, 2025, developers were notified that the gpt-4.5-preview model is deprecated and will be removed from the API in the coming months. Recommended replacement: gpt-4.1")
5454
static let gpt4_5_preview = "gpt-4.5-preview"
5555

56+
// GPT-5.1
57+
58+
/// `gpt-5.1` Enhanced version of GPT-5 with improved reasoning and performance
59+
static let gpt5_1 = "gpt-5.1"
60+
61+
/// `gpt-5.1-chat-latest` Latest GPT-5.1 model optimized for chat interactions
62+
static let gpt5_1_chat_latest = "gpt-5.1-chat-latest"
63+
5664
// GPT-5
5765

5866
/// `gpt-5` OpenAI's best AI system with significant leap in intelligence, designed for logic and multi-step tasks with deep reasoning
@@ -261,7 +269,7 @@ public extension Model {
261269
// reasoning
262270
.o4_mini, o3, o3_mini, .o1,
263271
// flagship
264-
.gpt5, .gpt5_mini, .gpt5_nano, .gpt5_chat, .gpt4_1, .gpt4_o, .gpt_4o_audio_preview, chatgpt_4o_latest,
272+
.gpt5, .gpt5_mini, .gpt5_nano, .gpt5_chat, .gpt5_1, .gpt5_1_chat_latest, .gpt4_1, .gpt4_o, .gpt_4o_audio_preview, chatgpt_4o_latest,
265273
// cost-optimized
266274
.gpt4_1_mini, .gpt4_1_nano, .gpt4_o_mini, .gpt_4o_mini_audio_preview,
267275
// tool-specific
@@ -274,7 +282,7 @@ public extension Model {
274282
// reasoning
275283
.o4_mini, .o3, .o3_mini, .o1, .o1_pro,
276284
// flagship
277-
.gpt5, .gpt5_mini, .gpt5_nano, .gpt5_chat, .gpt4_1, .gpt4_o, .chatgpt_4o_latest,
285+
.gpt5, .gpt5_mini, .gpt5_nano, .gpt5_chat, .gpt5_1, .gpt5_1_chat_latest, .gpt4_1, .gpt4_o, .chatgpt_4o_latest,
278286
// cost-optimized
279287
.gpt4_1_mini, .gpt4_1_nano, .gpt4_o_mini,
280288
.gpt4_turbo, .gpt4, .gpt3_5Turbo,

Sources/OpenAI/Public/Schemas/Generated/Components.swift

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4861,13 +4861,14 @@ public enum Components {
48614861
///
48624862
/// Constrains effort on reasoning for
48634863
/// [reasoning models](https://platform.openai.com/docs/guides/reasoning).
4864-
/// Currently supported values are `minimal`, `low`, `medium`, and `high`. Reducing
4864+
/// Currently supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
48654865
/// reasoning effort can result in faster responses and fewer tokens used
48664866
/// on reasoning in a response.
48674867
///
48684868
///
48694869
/// - Remark: Generated from `#/components/schemas/ReasoningEffort`.
48704870
@frozen public enum ReasoningEffort: String, Codable, Hashable, Sendable, CaseIterable {
4871+
case none = "none"
48714872
case minimal = "minimal"
48724873
case low = "low"
48734874
case medium = "medium"

Tests/OpenAITests/OpenAITestsDecoder.swift

Lines changed: 38 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -383,7 +383,44 @@ class OpenAITestsDecoder: XCTestCase {
383383
let decoded = try JSONDecoder().decode(Components.Schemas.Reasoning.self, from: data)
384384
XCTAssertEqual(decoded.effort, .minimal)
385385
}
386-
386+
387+
func testChatQueryWithReasoningEffortNone() throws {
388+
let chatQuery = ChatQuery(
389+
messages: [
390+
.init(role: .user, content: "Who are you?")!
391+
],
392+
model: .gpt5_1,
393+
reasoningEffort: ChatQuery.ReasoningEffort.none
394+
)
395+
let expectedValue = """
396+
{
397+
"model": "gpt-5.1",
398+
"messages": [
399+
{
400+
"role": "user",
401+
"content": "Who are you?"
402+
}
403+
],
404+
"reasoning_effort": "none",
405+
"stream": false
406+
}
407+
"""
408+
409+
let chatQueryAsDict = try jsonDataAsNSDictionary(JSONEncoder().encode(chatQuery))
410+
let expectedValueAsDict = try jsonDataAsNSDictionary(expectedValue.data(using: .utf8)!)
411+
412+
XCTAssertEqual(chatQueryAsDict, expectedValueAsDict)
413+
}
414+
415+
func testReasoningEffortDecodingNone() throws {
416+
let json = """
417+
{ "effort": "none" }
418+
"""
419+
let data = json.data(using: .utf8)!
420+
let decoded = try JSONDecoder().decode(Components.Schemas.Reasoning.self, from: data)
421+
XCTAssertEqual(decoded.effort, Components.Schemas.ReasoningEffort.none)
422+
}
423+
387424
func testEmbeddings() async throws {
388425
let data = """
389426
{

0 commit comments

Comments
 (0)