diff --git a/Sources/OpenAI/Public/Models/Responses API/ResponseObject.swift b/Sources/OpenAI/Public/Models/Responses API/ResponseObject.swift index 7e2bb9cc..a7e97abf 100644 --- a/Sources/OpenAI/Public/Models/Responses API/ResponseObject.swift +++ b/Sources/OpenAI/Public/Models/Responses API/ResponseObject.swift @@ -73,7 +73,7 @@ public struct ResponseObject: Codable, Equatable, Sendable { public let text: ResponseProperties.TextPayload /// How the model should select which tool (or tools) to use when generating a response. See the `tools` parameter to see how to specify which tools the model can call. - public let toolChoice: ResponseProperties.ToolChoicePayload + public let toolChoice: ResponseProperties.ToolChoicePayload? /// An array of tools the model may call while generating a response. You can specify which tool to use by setting the `tool_choice` parameter. /// @@ -127,4 +127,31 @@ public struct ResponseObject: Codable, Equatable, Sendable { case usage case user } + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + self.createdAt = try container.decode(Int.self, forKey: .createdAt) + self.error = try container.decodeIfPresent(Schemas.ResponseError.self, forKey: .error) + self.id = try container.decode(String.self, forKey: .id) + self.incompleteDetails = try container.decodeIfPresent(IncompleteDetails.self, forKey: .incompleteDetails) + self.instructions = try container.decodeIfPresent(String.self, forKey: .instructions) + self.maxOutputTokens = try container.decodeIfPresent(Int.self, forKey: .maxOutputTokens) + // Default to empty dictionary when metadata key is missing + self.metadata = try container.decodeIfPresent([String: String].self, forKey: .metadata) ?? [:] + self.model = try container.decode(String.self, forKey: .model) + self.object = try container.decode(String.self, forKey: .object) + self.output = try container.decode([OutputItem].self, forKey: .output) + self.parallelToolCalls = try container.decodeIfPresent(Bool.self, forKey: .parallelToolCalls) ?? false + self.previousResponseId = try container.decodeIfPresent(String.self, forKey: .previousResponseId) + self.reasoning = try container.decodeIfPresent(Schemas.Reasoning.self, forKey: .reasoning) + self.status = try container.decode(String.self, forKey: .status) + self.temperature = try container.decodeIfPresent(Double.self, forKey: .temperature) + self.text = try container.decodeIfPresent(ResponseProperties.TextPayload.self, forKey: .text) ?? ResponseProperties.TextPayload() + self.toolChoice = try container.decodeIfPresent(ResponseProperties.ToolChoicePayload.self, forKey: .toolChoice) + self.tools = try container.decodeIfPresent([Tool].self, forKey: .tools) ?? [] + self.topP = try container.decodeIfPresent(Double.self, forKey: .topP) + self.truncation = try container.decodeIfPresent(String.self, forKey: .truncation) + self.usage = try container.decodeIfPresent(Schemas.ResponseUsage.self, forKey: .usage) + self.user = try container.decodeIfPresent(String.self, forKey: .user) + } } diff --git a/Sources/OpenAI/Public/Schemas/Generated/Components.swift b/Sources/OpenAI/Public/Schemas/Generated/Components.swift index 3874c357..769e2fd4 100644 --- a/Sources/OpenAI/Public/Schemas/Generated/Components.swift +++ b/Sources/OpenAI/Public/Schemas/Generated/Components.swift @@ -9278,7 +9278,7 @@ public enum Components { /// A detailed breakdown of the input tokens. /// /// - Remark: Generated from `#/components/schemas/ResponseUsage/input_tokens_details`. - public var inputTokensDetails: Components.Schemas.ResponseUsage.InputTokensDetailsPayload + public var inputTokensDetails: Components.Schemas.ResponseUsage.InputTokensDetailsPayload? /// The number of output tokens. /// /// - Remark: Generated from `#/components/schemas/ResponseUsage/output_tokens`. @@ -9305,7 +9305,7 @@ public enum Components { /// A detailed breakdown of the output tokens. /// /// - Remark: Generated from `#/components/schemas/ResponseUsage/output_tokens_details`. - public var outputTokensDetails: Components.Schemas.ResponseUsage.OutputTokensDetailsPayload + public var outputTokensDetails: Components.Schemas.ResponseUsage.OutputTokensDetailsPayload? /// The total number of tokens used. /// /// - Remark: Generated from `#/components/schemas/ResponseUsage/total_tokens`. @@ -11970,6 +11970,22 @@ public enum Components { public var annotations: [Components.Schemas.Annotation] /// - Remark: Generated from `#/components/schemas/OutputTextContent/logprobs`. public var logprobs: [Components.Schemas.LogProb]? + + public enum CodingKeys: String, CodingKey { + case _type = "type" + case text + case annotations + case logprobs + } + + public init(from decoder: any Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + self._type = try container.decode(Components.Schemas.OutputTextContent._TypePayload.self, forKey: ._type) + self.text = try container.decode(Swift.String.self, forKey: .text) + // Default to empty array if annotations are missing + self.annotations = try container.decodeIfPresent([Components.Schemas.Annotation].self, forKey: .annotations) ?? [] + self.logprobs = try container.decodeIfPresent([Components.Schemas.LogProb].self, forKey: .logprobs) + } /// Creates a new `OutputTextContent`. /// /// - Parameters: @@ -11988,12 +12004,6 @@ public enum Components { self.annotations = annotations self.logprobs = logprobs } - public enum CodingKeys: String, CodingKey { - case _type = "type" - case text - case annotations - case logprobs - } } /// A refusal from the model. ///