diff --git a/FirebaseAI/Sources/GenerationConfig.swift b/FirebaseAI/Sources/GenerationConfig.swift index 8ff4e847d63..90d8eef7798 100644 --- a/FirebaseAI/Sources/GenerationConfig.swift +++ b/FirebaseAI/Sources/GenerationConfig.swift @@ -19,34 +19,34 @@ import Foundation @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *) public struct GenerationConfig: Sendable { /// Controls the degree of randomness in token selection. - let temperature: Float? + var temperature: Float? /// Controls diversity of generated text. - let topP: Float? + var topP: Float? /// Limits the number of highest probability words considered. - let topK: Int? + var topK: Int? /// The number of response variations to return. var candidateCount: Int? /// Maximum number of tokens that can be generated in the response. - let maxOutputTokens: Int? + var maxOutputTokens: Int? /// Controls the likelihood of repeating the same words or phrases already generated in the text. - let presencePenalty: Float? + var presencePenalty: Float? /// Controls the likelihood of repeating words, with the penalty increasing for each repetition. - let frequencyPenalty: Float? + var frequencyPenalty: Float? /// A set of up to 5 `String`s that will stop output generation. - let stopSequences: [String]? + var stopSequences: [String]? /// Output response MIME type of the generated candidate text. var responseMIMEType: String? /// Output schema of the generated candidate text. - let responseSchema: Schema? + var responseSchema: Schema? /// Output schema of the generated response in [JSON Schema](https://json-schema.org/) format. /// @@ -57,7 +57,7 @@ public struct GenerationConfig: Sendable { var responseModalities: [ResponseModality]? /// Configuration for controlling the "thinking" behavior of compatible Gemini models. - let thinkingConfig: ThinkingConfig? + var thinkingConfig: ThinkingConfig? /// Creates a new `GenerationConfig` value. /// @@ -203,6 +203,79 @@ public struct GenerationConfig: Sendable { self.responseModalities = responseModalities self.thinkingConfig = thinkingConfig } + + /// Merges two configurations, giving precedence to values found in the `overrides` parameter. + /// + /// - Parameters: + /// - base: The foundational configuration (e.g., model-level defaults). + /// - overrides: The configuration containing values that should supersede the base (e.g., + /// request-level specific settings). + /// - Returns: A merged `GenerationConfig` prioritizing `overrides`, or `nil` if both inputs are + /// `nil`. + static func merge(_ base: GenerationConfig?, + with overrides: GenerationConfig?) -> GenerationConfig? { + // 1. If the base config is missing, return the overrides (which might be nil). + guard let baseConfig = base else { + return overrides + } + + // 2. If overrides are missing, strictly return the base. + guard let overrideConfig = overrides else { + return baseConfig + } + + // 3. Start with a copy of the base config. + var config = baseConfig + + // 4. Overwrite with any non-nil values found in the overrides. + config.temperature = overrideConfig.temperature ?? config.temperature + config.topP = overrideConfig.topP ?? config.topP + config.topK = overrideConfig.topK ?? config.topK + config.candidateCount = overrideConfig.candidateCount ?? config.candidateCount + config.maxOutputTokens = overrideConfig.maxOutputTokens ?? config.maxOutputTokens + config.presencePenalty = overrideConfig.presencePenalty ?? config.presencePenalty + config.frequencyPenalty = overrideConfig.frequencyPenalty ?? config.frequencyPenalty + config.stopSequences = overrideConfig.stopSequences ?? config.stopSequences + config.responseMIMEType = overrideConfig.responseMIMEType ?? config.responseMIMEType + config.responseModalities = overrideConfig.responseModalities ?? config.responseModalities + config.thinkingConfig = overrideConfig.thinkingConfig ?? config.thinkingConfig + + // 5. Handle Schema mutual exclusivity with precedence for `responseJSONSchema`. + if let responseJSONSchema = overrideConfig.responseJSONSchema { + config.responseJSONSchema = responseJSONSchema + config.responseSchema = nil + } else if let responseSchema = overrideConfig.responseSchema { + config.responseSchema = responseSchema + config.responseJSONSchema = nil + } + + return config + } + + /// Merges configurations and explicitly enforces settings required for JSON structured output. + /// + /// - Parameters: + /// - base: The foundational configuration (e.g., model defaults). + /// - overrides: The configuration containing overrides (e.g., request specific). + /// - jsonSchema: The JSON schema to enforce on the output. + /// - Returns: A non-nil `GenerationConfig` with the merged values and JSON constraints applied. + static func merge(_ base: GenerationConfig?, + with overrides: GenerationConfig?, + enforcingJSONSchema jsonSchema: JSONSchema) -> GenerationConfig { + // 1. Merge base and overrides, defaulting to a fresh config if both are nil. + var config = GenerationConfig.merge(base, with: overrides) ?? GenerationConfig() + + // 2. Enforce the specific constraints for JSON Schema generation. + config.responseMIMEType = "application/json" + config.responseJSONSchema = jsonSchema + config.responseSchema = nil // Clear conflicting legacy schema + + // 3. Clear incompatible or conflicting options. + config.candidateCount = nil // Structured output typically requires default candidate behaviour + config.responseModalities = nil // Ensure text-only output for JSON + + return config + } } // MARK: - Codable Conformances diff --git a/FirebaseAI/Sources/GenerativeModel.swift b/FirebaseAI/Sources/GenerativeModel.swift index ec0f570bfe2..a9f8751c469 100644 --- a/FirebaseAI/Sources/GenerativeModel.swift +++ b/FirebaseAI/Sources/GenerativeModel.swift @@ -146,49 +146,6 @@ public final class GenerativeModel: Sendable { return try await generateContent(content, generationConfig: generationConfig) } - func generateContent(_ content: [ModelContent], - generationConfig: GenerationConfig?) async throws - -> GenerateContentResponse { - try content.throwIfError() - let response: GenerateContentResponse - let generateContentRequest = GenerateContentRequest( - model: modelResourceName, - contents: content, - generationConfig: generationConfig, - safetySettings: safetySettings, - tools: tools, - toolConfig: toolConfig, - systemInstruction: systemInstruction, - apiConfig: apiConfig, - apiMethod: .generateContent, - options: requestOptions - ) - do { - response = try await generativeAIService.loadRequest(request: generateContentRequest) - } catch { - throw GenerativeModel.generateContentError(from: error) - } - - // Check the prompt feedback to see if the prompt was blocked. - if response.promptFeedback?.blockReason != nil { - throw GenerateContentError.promptBlocked(response: response) - } - - // Check to see if an error should be thrown for stop reason. - if let reason = response.candidates.first?.finishReason, reason != .stop { - throw GenerateContentError.responseStoppedEarly(reason: reason, response: response) - } - - // If all candidates are empty (contain no information that a developer could act on) then throw - if response.candidates.allSatisfy({ $0.isEmpty }) { - throw GenerateContentError.internalError(underlying: InvalidCandidateError.emptyContent( - underlyingError: Candidate.EmptyContentError() - )) - } - - return response - } - /// Generates content from String and/or image inputs, given to the model as a prompt, that are /// representable as one or more ``Part``s. /// @@ -218,122 +175,78 @@ public final class GenerativeModel: Sendable { public func generateContentStream(_ content: [ModelContent]) throws -> AsyncThrowingStream { try content.throwIfError() - let generateContentRequest = GenerateContentRequest( - model: modelResourceName, - contents: content, - generationConfig: generationConfig, - safetySettings: safetySettings, - tools: tools, - toolConfig: toolConfig, - systemInstruction: systemInstruction, - apiConfig: apiConfig, - apiMethod: .streamGenerateContent, - options: requestOptions - ) - - return AsyncThrowingStream { continuation in - let responseStream = generativeAIService.loadRequestStream(request: generateContentRequest) - Task { - do { - var didYieldResponse = false - for try await response in responseStream { - // Check the prompt feedback to see if the prompt was blocked. - if response.promptFeedback?.blockReason != nil { - throw GenerateContentError.promptBlocked(response: response) - } - - // If the stream ended early unexpectedly, throw an error. - if let finishReason = response.candidates.first?.finishReason, finishReason != .stop { - throw GenerateContentError.responseStoppedEarly( - reason: finishReason, - response: response - ) - } - // Skip returning the response if all candidates are empty (i.e., they contain no - // information that a developer could act on). - if response.candidates.allSatisfy({ $0.isEmpty }) { - AILog.log( - level: .debug, - code: .generateContentResponseEmptyCandidates, - "Skipped response with all empty candidates: \(response)" - ) - } else { - continuation.yield(response) - didYieldResponse = true - } - } - - // Throw an error if all responses were skipped due to empty content. - if didYieldResponse { - continuation.finish() - } else { - continuation.finish(throwing: GenerativeModel.generateContentError( - from: InvalidCandidateError.emptyContent( - underlyingError: Candidate.EmptyContentError() - ) - )) - } - } catch { - continuation.finish(throwing: GenerativeModel.generateContentError(from: error)) - return - } - } - } + return try generateContentStream(content, generationConfig: generationConfig) } - public func generate(_ type: Content.Type, - from parts: any PartsRepresentable...) async throws - -> Response where Content: FirebaseGenerable { - var generationConfig = self.generationConfig ?? GenerationConfig() - generationConfig.candidateCount = nil - generationConfig.responseMIMEType = "application/json" - generationConfig.responseJSONSchema = type.jsonSchema - generationConfig.responseModalities = nil - - let response: GenerateContentResponse - do { - response = try await generateContent( - [ModelContent(parts: parts)], generationConfig: generationConfig + // TODO: Remove the `#if compiler(>=6.2)` when Xcode 26 is the minimum supported version. + #if compiler(>=6.2) + @discardableResult + public final nonisolated(nonsending) + func respond(to prompt: any PartsRepresentable, options: GenerationConfig? = nil) + async throws -> GenerativeModel.Response { + return try await respond( + to: prompt, + generating: String.self, + schema: nil, + includeSchemaInPrompt: false, + options: options ) - } catch let error as GenerateContentError { - throw GenerationError.generationFailure(error) - } catch { - throw GenerationError.generationFailure(GenerateContentError.internalError(underlying: error)) } - // TODO: Add `GenerateContentResponse` as context in errors. - - guard let jsonText = response.text else { - throw GenerationError.decodingFailure( - GenerationError.Context(debugDescription: "No JSON text in response.") + @discardableResult + public final nonisolated(nonsending) + func respond(to prompt: any PartsRepresentable, schema: JSONSchema, + includeSchemaInPrompt: Bool = true, options: GenerationConfig? = nil) + async throws -> GenerativeModel.Response { + return try await respond( + to: prompt, + generating: ModelOutput.self, + schema: schema, + includeSchemaInPrompt: includeSchemaInPrompt, + options: options ) } - let modelOutput: ModelOutput - do { - modelOutput = try ModelOutput(json: jsonText) - } catch let error as GenerationError { - throw error - } catch { - throw GenerationError.decodingFailure( - GenerationError.Context(debugDescription: "Failed to decode response JSON: \(jsonText)") + @discardableResult + public final nonisolated(nonsending) + func respond(to prompt: any PartsRepresentable, + generating type: Content.Type = Content.self, + includeSchemaInPrompt: Bool = true, options: GenerationConfig? = nil) + async throws -> GenerativeModel.Response where Content: FirebaseGenerable { + return try await respond( + to: prompt, + generating: type, + schema: type.jsonSchema, + includeSchemaInPrompt: includeSchemaInPrompt, + options: options ) } - let content: Content - do { - content = try Content(modelOutput) - } catch let error as GenerationError { - throw error - } catch { - throw GenerationError.decodingFailure( - GenerationError.Context(debugDescription: "Failed to decode \(type) from: \(modelOutput)") - ) + public final func streamResponse(to prompt: any PartsRepresentable, + options: GenerationConfig? = nil) + -> sending GenerativeModel.ResponseStream { + return streamResponse(to: prompt, generating: String.self, schema: nil, + includeSchemaInPrompt: false, options: options) } - return Response(content: content, rawContent: modelOutput, rawResponse: response) - } + public final func streamResponse(to prompt: any PartsRepresentable, schema: JSONSchema, + includeSchemaInPrompt: Bool = true, + options: GenerationConfig? = nil) + -> sending GenerativeModel.ResponseStream { + return streamResponse(to: prompt, generating: ModelOutput.self, schema: schema, + includeSchemaInPrompt: includeSchemaInPrompt, options: options) + } + + public final func streamResponse(to prompt: any PartsRepresentable, + generating type: Content.Type = Content.self, + includeSchemaInPrompt: Bool = true, + options: GenerationConfig? = nil) + -> sending GenerativeModel.ResponseStream where Content: FirebaseGenerable { + return streamResponse(to: prompt, generating: type, schema: type.jsonSchema, + includeSchemaInPrompt: includeSchemaInPrompt, options: options) + } + #endif // compiler(>=6.2) /// Creates a new chat conversation using this model with the provided history. public func startChat(history: [ModelContent] = []) -> Chat { @@ -425,15 +338,209 @@ public final class GenerativeModel: Sendable { return GenerateContentError.internalError(underlying: error) } - public struct Response where Content: FirebaseGenerable { - public let content: Content - public let rawContent: ModelOutput - public let rawResponse: GenerateContentResponse + // MARK: - Internal Helpers - init(content: Content, rawContent: ModelOutput, rawResponse: GenerateContentResponse) { - self.content = content - self.rawContent = rawContent - self.rawResponse = rawResponse + func generateContent(_ content: [ModelContent], + generationConfig: GenerationConfig?) async throws + -> GenerateContentResponse { + try content.throwIfError() + let response: GenerateContentResponse + let generateContentRequest = GenerateContentRequest( + model: modelResourceName, + contents: content, + generationConfig: generationConfig, + safetySettings: safetySettings, + tools: tools, + toolConfig: toolConfig, + systemInstruction: systemInstruction, + apiConfig: apiConfig, + apiMethod: .generateContent, + options: requestOptions + ) + do { + response = try await generativeAIService.loadRequest(request: generateContentRequest) + } catch { + throw GenerativeModel.generateContentError(from: error) + } + + // Check the prompt feedback to see if the prompt was blocked. + if response.promptFeedback?.blockReason != nil { + throw GenerateContentError.promptBlocked(response: response) + } + + // Check to see if an error should be thrown for stop reason. + if let reason = response.candidates.first?.finishReason, reason != .stop { + throw GenerateContentError.responseStoppedEarly(reason: reason, response: response) + } + + // If all candidates are empty (contain no information that a developer could act on) then throw + if response.candidates.allSatisfy({ $0.isEmpty }) { + throw GenerateContentError.internalError(underlying: InvalidCandidateError.emptyContent( + underlyingError: Candidate.EmptyContentError() + )) } + + return response } + + func generateContentStream(_ content: [ModelContent], + generationConfig: GenerationConfig?) throws + -> AsyncThrowingStream { + try content.throwIfError() + let generateContentRequest = GenerateContentRequest( + model: modelResourceName, + contents: content, + generationConfig: generationConfig, + safetySettings: safetySettings, + tools: tools, + toolConfig: toolConfig, + systemInstruction: systemInstruction, + apiConfig: apiConfig, + apiMethod: .streamGenerateContent, + options: requestOptions + ) + + return AsyncThrowingStream { continuation in + let responseStream = generativeAIService.loadRequestStream(request: generateContentRequest) + Task { + do { + var didYieldResponse = false + for try await response in responseStream { + // Check the prompt feedback to see if the prompt was blocked. + if response.promptFeedback?.blockReason != nil { + throw GenerateContentError.promptBlocked(response: response) + } + + // If the stream ended early unexpectedly, throw an error. + if let finishReason = response.candidates.first?.finishReason, finishReason != .stop { + throw GenerateContentError.responseStoppedEarly( + reason: finishReason, + response: response + ) + } + + // Skip returning the response if all candidates are empty (i.e., they contain no + // information that a developer could act on). + if response.candidates.allSatisfy({ $0.isEmpty }) { + AILog.log( + level: .debug, + code: .generateContentResponseEmptyCandidates, + "Skipped response with all empty candidates: \(response)" + ) + } else { + continuation.yield(response) + didYieldResponse = true + } + } + + // Throw an error if all responses were skipped due to empty content. + if didYieldResponse { + continuation.finish() + } else { + continuation.finish(throwing: GenerativeModel.generateContentError( + from: InvalidCandidateError.emptyContent( + underlyingError: Candidate.EmptyContentError() + ) + )) + } + } catch { + continuation.finish(throwing: GenerativeModel.generateContentError(from: error)) + return + } + } + } + } + + // TODO: Remove the `#if compiler(>=6.2)` when Xcode 26 is the minimum supported version. + #if compiler(>=6.2) + final nonisolated(nonsending) + func respond(to prompt: any PartsRepresentable, generating type: Content.Type, + schema: JSONSchema?, includeSchemaInPrompt: Bool, + options: GenerationConfig?) + async throws -> GenerativeModel.Response where Content: FirebaseGenerable { + let parts = [ModelContent(parts: prompt)] + + let generationConfig: GenerationConfig? + if let schema { + generationConfig = GenerationConfig.merge( + self.generationConfig, with: options, enforcingJSONSchema: schema + ) + } else { + generationConfig = GenerationConfig.merge(self.generationConfig, with: options) + } + + do { + let response = try await generateContent(parts, generationConfig: generationConfig) + guard let text = response.text else { + throw GenerationError.decodingFailure(.init(debugDescription: "No text in response.")) + } + let responseID = response.responseID.map { ResponseID(responseID: $0) } + let modelOutput: ModelOutput + if schema == nil { + modelOutput = ModelOutput(kind: .string(text), id: responseID, isComplete: true) + } else { + modelOutput = try ModelOutput(json: text, id: responseID, streaming: false) + } + return try GenerativeModel.Response( + content: Content(modelOutput), + rawContent: modelOutput, + rawResponse: response + ) + } catch let error as GenerationError { + throw error + } catch let error as GenerateContentError { + throw GenerationError.generationFailure(error) + } catch { + throw GenerationError.generationFailure( + GenerateContentError.internalError(underlying: error) + ) + } + // TODO: Add `GenerateContentResponse` as context in errors. + } + + final func streamResponse(to prompt: any PartsRepresentable, + generating type: Content.Type, schema: JSONSchema?, + includeSchemaInPrompt: Bool, options: GenerationConfig?) + -> sending GenerativeModel.ResponseStream where Content: FirebaseGenerable { + let parts = [ModelContent(parts: prompt)] + + let generationConfig: GenerationConfig? + if let schema { + generationConfig = GenerationConfig.merge( + self.generationConfig, with: options, enforcingJSONSchema: schema + ) + } else { + generationConfig = GenerationConfig.merge(self.generationConfig, with: options) + } + + return GenerativeModel.ResponseStream { context in + do { + let stream = try self.generateContentStream(parts, generationConfig: generationConfig) + var json = "" + for try await response in stream { + if let text = response.text { + json += text + let responseID = response.responseID.map { ResponseID(responseID: $0) } + let modelOutput = try ModelOutput(json: json, id: responseID, streaming: true) + try await context.yield( + GenerativeModel.ResponseStream.Snapshot( + content: Content.Partial(modelOutput), + rawContent: modelOutput, + rawResponse: response + ) + ) + } + } + await context.finish() + } catch let error as GenerateContentError { + await context.finish(throwing: GenerationError.generationFailure(error)) + } catch { + await context.finish(throwing: GenerationError.generationFailure( + GenerateContentError.internalError(underlying: error) + )) + } + // TODO: Add `GenerateContentResponse` as context in errors. + } + } + #endif // compiler(>=6.2) } diff --git a/FirebaseAI/Sources/Types/Public/StructuredOutput/GenerativeModel+Response.swift b/FirebaseAI/Sources/Types/Public/StructuredOutput/GenerativeModel+Response.swift new file mode 100644 index 00000000000..6ecb35eadf1 --- /dev/null +++ b/FirebaseAI/Sources/Types/Public/StructuredOutput/GenerativeModel+Response.swift @@ -0,0 +1,31 @@ +// Copyright 2026 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// TODO: Remove the `#if compiler(>=6.2)` when Xcode 26 is the minimum supported version. +#if compiler(>=6.2) + @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *) + public extension GenerativeModel { + struct Response where Content: FirebaseGenerable { + public let content: Content + public let rawContent: ModelOutput + public let rawResponse: GenerateContentResponse + + init(content: Content, rawContent: ModelOutput, rawResponse: GenerateContentResponse) { + self.content = content + self.rawContent = rawContent + self.rawResponse = rawResponse + } + } + } +#endif // compiler(>=6.2) diff --git a/FirebaseAI/Sources/Types/Public/StructuredOutput/GenerativeModel+ResponseStream.swift b/FirebaseAI/Sources/Types/Public/StructuredOutput/GenerativeModel+ResponseStream.swift new file mode 100644 index 00000000000..76751d411a0 --- /dev/null +++ b/FirebaseAI/Sources/Types/Public/StructuredOutput/GenerativeModel+ResponseStream.swift @@ -0,0 +1,133 @@ +// Copyright 2026 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// TODO: Remove the `#if compiler(>=6.2)` when Xcode 26 is the minimum supported version. +#if compiler(>=6.2) + @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *) + public extension GenerativeModel { + struct ResponseStream: AsyncSequence, Sendable + where Content: FirebaseGenerable & Sendable, Content.Partial: Sendable { + public typealias Element = Snapshot + public typealias AsyncIterator = AsyncThrowingStream.Iterator + + private let _stream: AsyncThrowingStream + private let _context: StreamContext + + public struct Snapshot: Sendable { + public let content: Content.Partial + public let rawContent: ModelOutput + public let rawResponse: GenerateContentResponse + } + + init(_ builder: @escaping @Sendable (StreamContext) async -> Void) { + var extractedContinuation: AsyncThrowingStream.Continuation! + let stream = AsyncThrowingStream(Snapshot.self) { continuation in + extractedContinuation = continuation + } + _stream = stream + + let context = StreamContext(continuation: extractedContinuation) + _context = context + + Task { + await builder(context) + } + } + + public func makeAsyncIterator() -> AsyncIterator { + return _stream.makeAsyncIterator() + } + + public nonisolated(nonsending) func collect() + async throws -> sending GenerativeModel.Response { + let finalResult = try await _context.value + return try GenerativeModel.Response( + content: Content(finalResult.rawContent), + rawContent: finalResult.rawContent, + rawResponse: finalResult.rawResponse + ) + } + } + } + + @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *) + extension GenerativeModel.ResponseStream { + actor StreamContext { + struct RawResult: Sendable { + let rawContent: ModelOutput + let rawResponse: GenerateContentResponse + } + + private let continuation: AsyncThrowingStream.Continuation + private var _finalResult: Result? + private var _waitingContinuations: [CheckedContinuation] = [] + private var _latestRaw: RawResult? + + init(continuation: AsyncThrowingStream.Continuation) { + self.continuation = continuation + } + + func yield(_ snapshot: Snapshot) { + _latestRaw = RawResult(rawContent: snapshot.rawContent, rawResponse: snapshot.rawResponse) + continuation.yield(snapshot) + } + + func finish() { + continuation.finish() + finalize(with: nil) + } + + func finish(throwing error: Error) { + continuation.finish(throwing: error) + finalize(with: error) + } + + var value: RawResult { + get async throws { + if let result = _finalResult { + return try result.get() + } + return try await withCheckedThrowingContinuation { continuation in + _waitingContinuations.append(continuation) + } + } + } + + private func finalize(with error: Error?) { + let result: Result + + if let error = error { + result = .failure(error) + } else if let last = _latestRaw { + result = .success(last) + } else { + result = .failure(ResponseStreamError.noContentGenerated) + } + + _finalResult = result + + for continuation in _waitingContinuations { + continuation.resume(with: result) + } + _waitingContinuations.removeAll() + } + } + + enum ResponseStreamError: Error { + /// Thrown when `collect()` is called on a stream that finishes without producing any + /// snapshots. + case noContentGenerated + } + } +#endif // compiler(>=6.2) diff --git a/FirebaseAI/Sources/Types/Public/StructuredOutput/ModelOutput.swift b/FirebaseAI/Sources/Types/Public/StructuredOutput/ModelOutput.swift index b70063ecbb7..076735743c7 100644 --- a/FirebaseAI/Sources/Types/Public/StructuredOutput/ModelOutput.swift +++ b/FirebaseAI/Sources/Types/Public/StructuredOutput/ModelOutput.swift @@ -95,16 +95,71 @@ public struct ModelOutput: Sendable, CustomDebugStringConvertible, FirebaseGener self = value.modelOutput } - public init(json: String, id: ResponseID? = nil) throws { - guard let jsonData = json.data(using: .utf8) else { - fatalError() + init(json: String, id: ResponseID? = nil, streaming: Bool?) throws { + var modelOutput: ModelOutput + var decodingError: Error? + + // 1. Attempt to decode the JSON with the standard `JSONDecoder` since it likely offers the best + // performance and is available on iOS 15+. + // Note: This approach does not support decoding partial JSON when streaming. As an + // optimization, this approach is skipped when `streaming` is explicitly set to `true`. + if streaming != true { + guard let jsonData = json.data(using: .utf8) else { + throw GenerativeModel.GenerationError.decodingFailure( + GenerativeModel.GenerationError.Context( + debugDescription: "Failed to convert JSON to `Data`: \(json)" + ) + ) + } + do { + let jsonValue = try JSONDecoder().decode(JSONValue.self, from: jsonData) + modelOutput = jsonValue.modelOutput + modelOutput.id = id + + self = modelOutput + + return + } catch { + decodingError = error + } } - let jsonValue = try JSONDecoder().decode(JSONValue.self, from: jsonData) - var modelOutput = jsonValue.modelOutput - modelOutput.id = id + // 2. Attempt to decode using `GeneratedContent` from Foundation Models when available. It is + // designed to handle streaming JSON. + #if canImport(FoundationModels) + if #available(iOS 26.0, macOS 26.0, visionOS 26.0, *) { + do { + let generatedContent = try GeneratedContent(json: json) + modelOutput = generatedContent.modelOutput + modelOutput.id = id + + self = modelOutput + + return + } catch { + decodingError = error + } + } + #endif // canImport(FoundationModels) + + // 3. Fallback to decoding with a custom `StreamingJSONParser` when `GeneratedContent` is not + // available. + // TODO: Add a fallback streaming JSON parser + + // 4. Throw a decoding error if all attempts to decode the JSON have failed. + if let decodingError { + throw decodingError + } else { + throw GenerativeModel.GenerationError.decodingFailure( + GenerativeModel.GenerationError.Context(debugDescription: "Failed to decode JSON: \(json)") + ) + } + } - self = modelOutput + public init(json: String) throws { + // Since it's unknown if the JSON is partial (for streaming), disable the optimizations by + // specifying `streaming: nil`. + try self.init(json: json, id: nil, streaming: nil) } public func value(_ type: Value.Type = Value.self) throws -> Value diff --git a/FirebaseAI/Tests/TestApp/Tests/Integration/GenerateContentIntegrationTests.swift b/FirebaseAI/Tests/TestApp/Tests/Integration/GenerateContentIntegrationTests.swift index ed24e083a41..d9ac7dd7ec9 100644 --- a/FirebaseAI/Tests/TestApp/Tests/Integration/GenerateContentIntegrationTests.swift +++ b/FirebaseAI/Tests/TestApp/Tests/Integration/GenerateContentIntegrationTests.swift @@ -112,6 +112,74 @@ struct GenerateContentIntegrationTests { usageMetadata.thoughtsTokenCount)) } + // TODO: Remove the `#if compiler(>=6.2)` when Xcode 26 is the minimum supported version. + #if compiler(>=6.2) + @Test(arguments: [ + (InstanceConfig.vertexAI_v1beta, ModelNames.gemini2FlashLite), + (InstanceConfig.vertexAI_v1beta_global, ModelNames.gemini2FlashLite), + (InstanceConfig.vertexAI_v1beta_global_appCheckLimitedUse, ModelNames.gemini2FlashLite), + (InstanceConfig.googleAI_v1beta, ModelNames.gemini2FlashLite), + (InstanceConfig.googleAI_v1beta_appCheckLimitedUse, ModelNames.gemini2FlashLite), + (InstanceConfig.googleAI_v1beta, ModelNames.gemini3FlashPreview), + (InstanceConfig.googleAI_v1beta_appCheckLimitedUse, ModelNames.gemini3FlashPreview), + (InstanceConfig.googleAI_v1beta, ModelNames.gemma3_4B), + (InstanceConfig.googleAI_v1beta_freeTier, ModelNames.gemma3_4B), + // Note: The following configs are commented out for easy one-off manual testing. + // (InstanceConfig.googleAI_v1beta_freeTier, ModelNames.gemini2FlashLite), + // (InstanceConfig.googleAI_v1beta_staging, ModelNames.gemini2FlashLite), + // (InstanceConfig.googleAI_v1beta_staging, ModelNames.gemma3_4B), + // (InstanceConfig.vertexAI_v1beta_staging, ModelNames.gemini2FlashLite), + // (InstanceConfig.googleAI_v1beta_freeTier_bypassProxy, ModelNames.gemini2FlashLite), + // (InstanceConfig.googleAI_v1beta_freeTier_bypassProxy, ModelNames.gemma3_4B), + ]) + func respondWithString(_ config: InstanceConfig, modelName: String) async throws { + let model = FirebaseAI.componentInstance(config).generativeModel( + modelName: modelName, + generationConfig: generationConfig, + safetySettings: safetySettings, + ) + let prompt = "Where is Google headquarters located? Answer with the city name only." + + let response = try await model.respond(to: prompt) + + let text = response.content.trimmingCharacters(in: .whitespacesAndNewlines) + #expect(text == "Mountain View") + + let usageMetadata = try #require(response.rawResponse.usageMetadata) + #expect(usageMetadata.promptTokenCount.isEqual(to: 13, accuracy: tokenCountAccuracy)) + #expect(usageMetadata.promptTokensDetails.count == 1) + let promptTokensDetails = try #require(usageMetadata.promptTokensDetails.first) + #expect(promptTokensDetails.modality == .text) + #expect(promptTokensDetails.tokenCount == usageMetadata.promptTokenCount) + if modelName.hasPrefix("gemini-3") { + // For gemini-3 models, the thoughtsTokenCount can vary slightly between runs. + #expect(usageMetadata.thoughtsTokenCount >= 64) + } else { + #expect(usageMetadata.thoughtsTokenCount == 0) + } + // The fields `candidatesTokenCount` and `candidatesTokensDetails` are not included when using + // Gemma models. + if modelName.hasPrefix("gemini-3") { + #expect(usageMetadata.candidatesTokenCount == 2) + #expect(usageMetadata.candidatesTokensDetails.isEmpty) + } else if modelName.hasPrefix("gemma") { + #expect(usageMetadata.candidatesTokenCount == 0) + #expect(usageMetadata.candidatesTokensDetails.isEmpty) + } else { + #expect(usageMetadata.candidatesTokenCount.isEqual(to: 3, accuracy: tokenCountAccuracy)) + #expect(usageMetadata.candidatesTokensDetails.count == 1) + let candidatesTokensDetails = try #require(usageMetadata.candidatesTokensDetails.first) + #expect(candidatesTokensDetails.modality == .text) + #expect(candidatesTokensDetails.tokenCount == usageMetadata.candidatesTokenCount) + } + #expect(usageMetadata.cachedContentTokenCount == 0) + #expect(usageMetadata.cacheTokensDetails.isEmpty) + #expect(usageMetadata.totalTokenCount == (usageMetadata.promptTokenCount + + usageMetadata.candidatesTokenCount + + usageMetadata.thoughtsTokenCount)) + } + #endif // compiler(>=6.2) + @Test( "Generate an enum and provide a system instruction", arguments: InstanceConfig.allConfigs @@ -151,6 +219,47 @@ struct GenerateContentIntegrationTests { #expect(candidatesTokensDetails.tokenCount == usageMetadata.candidatesTokenCount) } + // TODO: Remove the `#if compiler(>=6.2)` when Xcode 26 is the minimum supported version. + #if compiler(>=6.2) + @Test( + "Generate an enum and provide a system instruction", + arguments: InstanceConfig.allConfigs + ) + func respondWithEnum(_ config: InstanceConfig) async throws { + let model = FirebaseAI.componentInstance(config).generativeModel( + modelName: ModelNames.gemini2FlashLite, + safetySettings: safetySettings, + tools: [], + toolConfig: .init(functionCallingConfig: .none()), + systemInstruction: ModelContent(role: "system", parts: "Always pick blue.") + ) + let prompt = "What is your favourite colour?" + + let response = try await model.respond(to: prompt, options: GenerationConfig( + responseMIMEType: "text/x.enum", + responseSchema: .enumeration(values: ["Red", "Green", "Blue"]) + )) + + let text = response.content.trimmingCharacters(in: .whitespacesAndNewlines) + #expect(text == "Blue") + + let usageMetadata = try #require(response.rawResponse.usageMetadata) + #expect(usageMetadata.promptTokenCount.isEqual(to: 15, accuracy: tokenCountAccuracy)) + #expect(usageMetadata.candidatesTokenCount.isEqual(to: 1, accuracy: tokenCountAccuracy)) + #expect(usageMetadata.thoughtsTokenCount == 0) + #expect(usageMetadata.totalTokenCount + == usageMetadata.promptTokenCount + usageMetadata.candidatesTokenCount) + #expect(usageMetadata.promptTokensDetails.count == 1) + let promptTokensDetails = try #require(usageMetadata.promptTokensDetails.first) + #expect(promptTokensDetails.modality == .text) + #expect(promptTokensDetails.tokenCount == usageMetadata.promptTokenCount) + #expect(usageMetadata.candidatesTokensDetails.count == 1) + let candidatesTokensDetails = try #require(usageMetadata.candidatesTokensDetails.first) + #expect(candidatesTokensDetails.modality == .text) + #expect(candidatesTokensDetails.tokenCount == usageMetadata.candidatesTokenCount) + } + #endif // compiler(>=6.2) + @Test( arguments: [ (.vertexAI_v1beta, ModelNames.gemini2_5_Flash, ThinkingConfig(thinkingBudget: 0)), diff --git a/FirebaseAI/Tests/TestApp/Tests/Integration/SchemaTests.swift b/FirebaseAI/Tests/TestApp/Tests/Integration/SchemaTests.swift index 284e30180ad..68c138e9d71 100644 --- a/FirebaseAI/Tests/TestApp/Tests/Integration/SchemaTests.swift +++ b/FirebaseAI/Tests/TestApp/Tests/Integration/SchemaTests.swift @@ -78,27 +78,30 @@ struct SchemaTests { ) } - @Test(arguments: InstanceConfig.allConfigs) - func generateTypeWithArray(_ config: InstanceConfig) async throws { - let model = FirebaseAI.componentInstance(config).generativeModel( - modelName: ModelNames.gemini2_5_FlashLite, - generationConfig: generationConfig, - safetySettings: safetySettings - ) - let prompt = "What are the biggest cities in Canada?" + // TODO: Remove the `#if compiler(>=6.2)` when Xcode 26 is the minimum supported version. + #if compiler(>=6.2) + @Test(arguments: InstanceConfig.allConfigs) + func respondWithArray(_ config: InstanceConfig) async throws { + let model = FirebaseAI.componentInstance(config).generativeModel( + modelName: ModelNames.gemini2_5_FlashLite, + generationConfig: generationConfig, + safetySettings: safetySettings + ) + let prompt = "What are the biggest cities in Canada?" - let response = try await model.generate(CityList.self, from: prompt) + let response = try await model.respond(to: prompt, generating: CityList.self) - let cityList = response.content - #expect( - cityList.cities.count >= 3, - "Expected at least 3 cities, but got \(cityList.cities.count)" - ) - #expect( - cityList.cities.count <= 5, - "Expected at most 5 cities, but got \(cityList.cities.count)" - ) - } + let cityList = response.content + #expect( + cityList.cities.count >= 3, + "Expected at least 3 cities, but got \(cityList.cities.count)" + ) + #expect( + cityList.cities.count <= 5, + "Expected at most 5 cities, but got \(cityList.cities.count)" + ) + } + #endif // compiler(>=6.2) @FirebaseGenerable struct TestNumber { @@ -138,21 +141,24 @@ struct SchemaTests { #expect(testNumber.value <= 120, "Expected a number <= 120, but got \(testNumber.value)") } - @Test(arguments: InstanceConfig.allConfigs) - func generateTypeWithNumber(_ config: InstanceConfig) async throws { - let model = FirebaseAI.componentInstance(config).generativeModel( - modelName: ModelNames.gemini2_5_FlashLite, - generationConfig: generationConfig, - safetySettings: safetySettings - ) - let prompt = "Give me a number" + // TODO: Remove the `#if compiler(>=6.2)` when Xcode 26 is the minimum supported version. + #if compiler(>=6.2) + @Test(arguments: InstanceConfig.allConfigs) + func respondWithNumber(_ config: InstanceConfig) async throws { + let model = FirebaseAI.componentInstance(config).generativeModel( + modelName: ModelNames.gemini2_5_FlashLite, + generationConfig: generationConfig, + safetySettings: safetySettings + ) + let prompt = "Give me a number" - let response = try await model.generate(TestNumber.self, from: prompt) + let response = try await model.respond(to: prompt, generating: TestNumber.self) - let testNumber = response.content - #expect(testNumber.value >= 110, "Expected a number >= 110, but got \(testNumber.value)") - #expect(testNumber.value <= 120, "Expected a number <= 120, but got \(testNumber.value)") - } + let testNumber = response.content + #expect(testNumber.value >= 110, "Expected a number >= 110, but got \(testNumber.value)") + #expect(testNumber.value <= 120, "Expected a number <= 120, but got \(testNumber.value)") + } + #endif // compiler(>=6.2) @FirebaseGenerable struct ProductInfo { @@ -217,28 +223,31 @@ struct SchemaTests { #expect(rating <= 5, "Expected a rating <= 5, but got \(rating)") } - @Test(arguments: InstanceConfig.allConfigs) - func generateTypeWithMultipleDataTypes(_ config: InstanceConfig) async throws { - let model = FirebaseAI.componentInstance(config).generativeModel( - modelName: ModelNames.gemini2_5_FlashLite, - generationConfig: generationConfig, - safetySettings: safetySettings - ) - let prompt = "Describe a premium wireless headphone, including a user rating and price." - - let response = try await model.generate(ProductInfo.self, from: prompt) - - let productInfo = response.content - let price = productInfo.price - let salePrice = productInfo.salePrice - let rating = productInfo.rating - #expect(price >= 10.0, "Expected a price >= 10.00, but got \(price)") - #expect(price <= 120.0, "Expected a price <= 120.00, but got \(price)") - #expect(salePrice >= 5.0, "Expected a salePrice >= 5.00, but got \(salePrice)") - #expect(salePrice <= 90.0, "Expected a salePrice <= 90.00, but got \(salePrice)") - #expect(rating >= 1, "Expected a rating >= 1, but got \(rating)") - #expect(rating <= 5, "Expected a rating <= 5, but got \(rating)") - } + // TODO: Remove the `#if compiler(>=6.2)` when Xcode 26 is the minimum supported version. + #if compiler(>=6.2) + @Test(arguments: InstanceConfig.allConfigs) + func respondWithMultipleDataTypes(_ config: InstanceConfig) async throws { + let model = FirebaseAI.componentInstance(config).generativeModel( + modelName: ModelNames.gemini2_5_FlashLite, + generationConfig: generationConfig, + safetySettings: safetySettings + ) + let prompt = "Describe a premium wireless headphone, including a user rating and price." + + let response = try await model.respond(to: prompt, generating: ProductInfo.self) + + let productInfo = response.content + let price = productInfo.price + let salePrice = productInfo.salePrice + let rating = productInfo.rating + #expect(price >= 10.0, "Expected a price >= 10.00, but got \(price)") + #expect(price <= 120.0, "Expected a price <= 120.00, but got \(price)") + #expect(salePrice >= 5.0, "Expected a salePrice >= 5.00, but got \(salePrice)") + #expect(salePrice <= 90.0, "Expected a salePrice <= 90.00, but got \(salePrice)") + #expect(rating >= 1, "Expected a rating >= 1, but got \(rating)") + #expect(rating <= 5, "Expected a rating <= 5, but got \(rating)") + } + #endif // compiler(>=6.2) @FirebaseGenerable struct MailingAddress { @@ -344,49 +353,52 @@ struct SchemaTests { } } - @Test(arguments: InstanceConfig.allConfigs) - func generateTypeAnyOf(_ config: InstanceConfig) async throws { - let model = FirebaseAI.componentInstance(config).generativeModel( - modelName: ModelNames.gemini2_5_Flash, - generationConfig: generationConfig, - safetySettings: safetySettings - ) - let prompt = """ - What are the mailing addresses for the University of Waterloo, UC Berkeley and Queen's U? - """ + // TODO: Remove the `#if compiler(>=6.2)` when Xcode 26 is the minimum supported version. + #if compiler(>=6.2) + @Test(arguments: InstanceConfig.allConfigs) + func respondWithAnyOfArray(_ config: InstanceConfig) async throws { + let model = FirebaseAI.componentInstance(config).generativeModel( + modelName: ModelNames.gemini2_5_Flash, + generationConfig: generationConfig, + safetySettings: safetySettings + ) + let prompt = """ + What are the mailing addresses for the University of Waterloo, UC Berkeley and Queen's U? + """ - let response = try await model.generate([MailingAddress].self, from: prompt) + let response = try await model.respond(to: prompt, generating: [MailingAddress].self) - let mailingAddresses = response.content - try #require( - mailingAddresses.count == 3, - "Expected 3 JSON addresses, got \(mailingAddresses.count)." - ) - let waterlooAddress = mailingAddresses[0] - #expect(waterlooAddress.city == "Waterloo") - if case let .canada(province, postalCode) = waterlooAddress.postalInfo { - #expect(province == "ON") - #expect(postalCode == "N2L 3G1") - } else { - Issue.record("Expected Canadian University of Waterloo address, got \(waterlooAddress).") - } - let berkeleyAddress = mailingAddresses[1] - #expect(berkeleyAddress.city == "Berkeley") - if case let .unitedStates(state, zipCode) = berkeleyAddress.postalInfo { - #expect(state == "CA") - #expect(zipCode == "94720") - } else { - Issue.record("Expected American UC Berkeley address, got \(berkeleyAddress).") - } - let queensAddress = mailingAddresses[2] - #expect(queensAddress.city == "Kingston") - if case let .canada(province, postalCode) = queensAddress.postalInfo { - #expect(province == "ON") - #expect(postalCode == "K7L 3N6") - } else { - Issue.record("Expected Canadian Queen's University address, got \(queensAddress).") + let mailingAddresses = response.content + try #require( + mailingAddresses.count == 3, + "Expected 3 JSON addresses, got \(mailingAddresses.count)." + ) + let waterlooAddress = mailingAddresses[0] + #expect(waterlooAddress.city == "Waterloo") + if case let .canada(province, postalCode) = waterlooAddress.postalInfo { + #expect(province == "ON") + #expect(postalCode == "N2L 3G1") + } else { + Issue.record("Expected Canadian University of Waterloo address, got \(waterlooAddress).") + } + let berkeleyAddress = mailingAddresses[1] + #expect(berkeleyAddress.city == "Berkeley") + if case let .unitedStates(state, zipCode) = berkeleyAddress.postalInfo { + #expect(state == "CA") + #expect(zipCode == "94720") + } else { + Issue.record("Expected American UC Berkeley address, got \(berkeleyAddress).") + } + let queensAddress = mailingAddresses[2] + #expect(queensAddress.city == "Kingston") + if case let .canada(province, postalCode) = queensAddress.postalInfo { + #expect(province == "ON") + #expect(postalCode == "K7L 3N6") + } else { + Issue.record("Expected Canadian Queen's University address, got \(queensAddress).") + } } - } + #endif // compiler(>=6.2) @FirebaseGenerable struct FeatureToggle { @@ -420,20 +432,23 @@ struct SchemaTests { #expect(featureToggle.isEnabled) } - @Test(arguments: InstanceConfig.allConfigs) - func generateTypeBoolean(_ config: InstanceConfig) async throws { - let model = FirebaseAI.componentInstance(config).generativeModel( - modelName: ModelNames.gemini2_5_FlashLite, - generationConfig: generationConfig, - safetySettings: safetySettings - ) - let prompt = "Should the experimental feature be active? Answer yes." + // TODO: Remove the `#if compiler(>=6.2)` when Xcode 26 is the minimum supported version. + #if compiler(>=6.2) + @Test(arguments: InstanceConfig.allConfigs) + func respondWithBoolean(_ config: InstanceConfig) async throws { + let model = FirebaseAI.componentInstance(config).generativeModel( + modelName: ModelNames.gemini2_5_FlashLite, + generationConfig: generationConfig, + safetySettings: safetySettings + ) + let prompt = "Should the experimental feature be active? Answer yes." - let response = try await model.generate(FeatureToggle.self, from: prompt) + let response = try await model.respond(to: prompt, generating: FeatureToggle.self) - let featureToggle = response.content - #expect(featureToggle.isEnabled) - } + let featureToggle = response.content + #expect(featureToggle.isEnabled) + } + #endif // compiler(>=6.2) @FirebaseGenerable struct UserProfile { @@ -471,21 +486,24 @@ struct SchemaTests { #expect(userProfile.middleName == nil) } - @Test(arguments: InstanceConfig.allConfigs) - func generateTypeOptional(_ config: InstanceConfig) async throws { - let model = FirebaseAI.componentInstance(config).generativeModel( - modelName: ModelNames.gemini2_5_FlashLite, - generationConfig: generationConfig, - safetySettings: safetySettings - ) - let prompt = "Create a user profile for 'jdoe' without a middle name." + // TODO: Remove the `#if compiler(>=6.2)` when Xcode 26 is the minimum supported version. + #if compiler(>=6.2) + @Test(arguments: InstanceConfig.allConfigs) + func respondWithUserProfile(_ config: InstanceConfig) async throws { + let model = FirebaseAI.componentInstance(config).generativeModel( + modelName: ModelNames.gemini2_5_FlashLite, + generationConfig: generationConfig, + safetySettings: safetySettings + ) + let prompt = "Create a user profile for 'jdoe' without a middle name." - let response = try await model.generate(UserProfile.self, from: prompt) + let response = try await model.respond(to: prompt, generating: UserProfile.self) - let userProfile = response.content - #expect(userProfile.username == "jdoe") - #expect(userProfile.middleName == nil) - } + let userProfile = response.content + #expect(userProfile.username == "jdoe") + #expect(userProfile.middleName == nil) + } + #endif // compiler(>=6.2) @FirebaseGenerable struct Pet { @@ -531,21 +549,24 @@ struct SchemaTests { #expect(pet.species == .cat) } - @Test(arguments: InstanceConfig.allConfigs) - func generateTypeSimpleStringEnum(_ config: InstanceConfig) async throws { - let model = FirebaseAI.componentInstance(config).generativeModel( - modelName: ModelNames.gemini2_5_FlashLite, - generationConfig: generationConfig, - safetySettings: safetySettings - ) - let prompt = "Create a pet dog named 'Buddy'." + // TODO: Remove the `#if compiler(>=6.2)` when Xcode 26 is the minimum supported version. + #if compiler(>=6.2) + @Test(arguments: InstanceConfig.allConfigs) + func respondWithSimpleStringEnum(_ config: InstanceConfig) async throws { + let model = FirebaseAI.componentInstance(config).generativeModel( + modelName: ModelNames.gemini2_5_FlashLite, + generationConfig: generationConfig, + safetySettings: safetySettings + ) + let prompt = "Create a pet dog named 'Buddy'." - let response = try await model.generate(Pet.self, from: prompt) + let response = try await model.respond(to: prompt, generating: Pet.self) - let pet = response.content - #expect(pet.name == "Buddy") - #expect(pet.species == .dog) - } + let pet = response.content + #expect(pet.name == "Buddy") + #expect(pet.species == .dog) + } + #endif // compiler(>=6.2) @FirebaseGenerable struct Task { @@ -594,21 +615,24 @@ struct SchemaTests { #expect(task.priority == .medium) } - @Test(arguments: InstanceConfig.allConfigs) - func generateTypeStringRawValueEnum(_ config: InstanceConfig) async throws { - let model = FirebaseAI.componentInstance(config).generativeModel( - modelName: ModelNames.gemini2_5_FlashLite, - generationConfig: generationConfig, - safetySettings: safetySettings - ) - let prompt = "Create a high priority task titled 'Fix Bug'." + // TODO: Remove the `#if compiler(>=6.2)` when Xcode 26 is the minimum supported version. + #if compiler(>=6.2) + @Test(arguments: InstanceConfig.allConfigs) + func respondWithStringRawValueEnum(_ config: InstanceConfig) async throws { + let model = FirebaseAI.componentInstance(config).generativeModel( + modelName: ModelNames.gemini2_5_FlashLite, + generationConfig: generationConfig, + safetySettings: safetySettings + ) + let prompt = "Create a high priority task titled 'Fix Bug'." - let response = try await model.generate(Task.self, from: prompt) + let response = try await model.respond(to: prompt, generating: Task.self) - let task = response.content - #expect(task.title == "Fix Bug") - #expect(task.priority == .high) - } + let task = response.content + #expect(task.title == "Fix Bug") + #expect(task.priority == .high) + } + #endif // compiler(>=6.2) @FirebaseGenerable struct GradeBook { @@ -649,23 +673,26 @@ struct SchemaTests { } } - @Test(arguments: InstanceConfig.allConfigs) - func generateTypeArrayConstraints(_ config: InstanceConfig) async throws { - let model = FirebaseAI.componentInstance(config).generativeModel( - modelName: ModelNames.gemini2_5_FlashLite, - generationConfig: generationConfig, - safetySettings: safetySettings - ) - let prompt = "Generate a gradebook with scores 95, 80, and 100." + // TODO: Remove the `#if compiler(>=6.2)` when Xcode 26 is the minimum supported version. + #if compiler(>=6.2) + @Test(arguments: InstanceConfig.allConfigs) + func respondWithConstrainedArray(_ config: InstanceConfig) async throws { + let model = FirebaseAI.componentInstance(config).generativeModel( + modelName: ModelNames.gemini2_5_FlashLite, + generationConfig: generationConfig, + safetySettings: safetySettings + ) + let prompt = "Generate a gradebook with scores 95, 80, and 100." - let response = try await model.generate(GradeBook.self, from: prompt) + let response = try await model.respond(to: prompt, generating: GradeBook.self) - let gradeBook = response.content - #expect(gradeBook.scores.count == 3) - for score in gradeBook.scores { - #expect(score >= 0 && score <= 100) + let gradeBook = response.content + #expect(gradeBook.scores.count == 3) + for score in gradeBook.scores { + #expect(score >= 0 && score <= 100) + } } - } + #endif // compiler(>=6.2) @FirebaseGenerable struct Catalog { @@ -731,27 +758,30 @@ struct SchemaTests { #expect(catalog.categories[0].items[0].price == 999.99) } - @Test(arguments: InstanceConfig.allConfigs) - func generateTypeNesting(_ config: InstanceConfig) async throws { - let model = FirebaseAI.componentInstance(config).generativeModel( - modelName: ModelNames.gemini2_5_FlashLite, - generationConfig: generationConfig, - safetySettings: safetySettings - ) - let prompt = """ - Create a catalog named 'Tech' with a category 'Computers' containing an item 'Laptop' for 999.99. - """ - - let response = try await model.generate(Catalog.self, from: prompt) - - let catalog = response.content - #expect(catalog.name == "Tech") - #expect(catalog.categories.count == 1) - #expect(catalog.categories[0].title == "Computers") - #expect(catalog.categories[0].items.count == 1) - #expect(catalog.categories[0].items[0].name == "Laptop") - #expect(catalog.categories[0].items[0].price == 999.99) - } + // TODO: Remove the `#if compiler(>=6.2)` when Xcode 26 is the minimum supported version. + #if compiler(>=6.2) + @Test(arguments: InstanceConfig.allConfigs) + func respondWithNestedType(_ config: InstanceConfig) async throws { + let model = FirebaseAI.componentInstance(config).generativeModel( + modelName: ModelNames.gemini2_5_FlashLite, + generationConfig: generationConfig, + safetySettings: safetySettings + ) + let prompt = """ + Create a catalog named 'Tech' with a category 'Computers' containing an item 'Laptop' for 999.99. + """ + + let response = try await model.respond(to: prompt, generating: Catalog.self) + + let catalog = response.content + #expect(catalog.name == "Tech") + #expect(catalog.categories.count == 1) + #expect(catalog.categories[0].title == "Computers") + #expect(catalog.categories[0].items.count == 1) + #expect(catalog.categories[0].items[0].name == "Laptop") + #expect(catalog.categories[0].items[0].price == 999.99) + } + #endif // compiler(>=6.2) @FirebaseGenerable struct Statement { @@ -785,20 +815,23 @@ struct SchemaTests { #expect(statement.balance == Decimal(string: "123.45")!) } - @Test(arguments: InstanceConfig.allConfigs) - func generateTypeDecimal(_ config: InstanceConfig) async throws { - let model = FirebaseAI.componentInstance(config).generativeModel( - modelName: ModelNames.gemini2_5_FlashLite, - generationConfig: generationConfig, - safetySettings: safetySettings - ) - let prompt = "Generate a statement with balance 123.45." + // TODO: Remove the `#if compiler(>=6.2)` when Xcode 26 is the minimum supported version. + #if compiler(>=6.2) + @Test(arguments: InstanceConfig.allConfigs) + func respondWithDecimalType(_ config: InstanceConfig) async throws { + let model = FirebaseAI.componentInstance(config).generativeModel( + modelName: ModelNames.gemini2_5_FlashLite, + generationConfig: generationConfig, + safetySettings: safetySettings + ) + let prompt = "Generate a statement with balance 123.45." - let response = try await model.generate(Statement.self, from: prompt) + let response = try await model.respond(to: prompt, generating: Statement.self) - let statement = response.content - #expect(statement.balance == Decimal(string: "123.45")!) - } + let statement = response.content + #expect(statement.balance == Decimal(string: "123.45")!) + } + #endif // compiler(>=6.2) @FirebaseGenerable struct Metadata { @@ -837,20 +870,23 @@ struct SchemaTests { #expect(metadata.tags.isEmpty) } - @Test(arguments: InstanceConfig.allConfigs) - func generateTypeEmptyCollection(_ config: InstanceConfig) async throws { - let model = FirebaseAI.componentInstance(config).generativeModel( - modelName: ModelNames.gemini2_5_FlashLite, - generationConfig: generationConfig, - safetySettings: safetySettings - ) - let prompt = "Generate metadata with no tags." + // TODO: Remove the `#if compiler(>=6.2)` when Xcode 26 is the minimum supported version. + #if compiler(>=6.2) + @Test(arguments: InstanceConfig.allConfigs) + func respondWithEmptyCollection(_ config: InstanceConfig) async throws { + let model = FirebaseAI.componentInstance(config).generativeModel( + modelName: ModelNames.gemini2_5_FlashLite, + generationConfig: generationConfig, + safetySettings: safetySettings + ) + let prompt = "Generate metadata with no tags." - let response = try await model.generate(Metadata.self, from: prompt) + let response = try await model.respond(to: prompt, generating: Metadata.self) - let metadata = response.content - #expect(metadata.tags.isEmpty) - } + let metadata = response.content + #expect(metadata.tags.isEmpty) + } + #endif // compiler(>=6.2) @FirebaseGenerable struct ConstrainedValue { @@ -884,20 +920,23 @@ struct SchemaTests { #expect(constrainedValue.value == 15) } - @Test(arguments: InstanceConfig.allConfigs) - func generateTypeCombinedGuides(_ config: InstanceConfig) async throws { - let model = FirebaseAI.componentInstance(config).generativeModel( - modelName: ModelNames.gemini2_5_FlashLite, - generationConfig: generationConfig, - safetySettings: safetySettings - ) - let prompt = "Give me the value 15." + // TODO: Remove the `#if compiler(>=6.2)` when Xcode 26 is the minimum supported version. + #if compiler(>=6.2) + @Test(arguments: InstanceConfig.allConfigs) + func respondWithTypeCombinedGuides(_ config: InstanceConfig) async throws { + let model = FirebaseAI.componentInstance(config).generativeModel( + modelName: ModelNames.gemini2_5_FlashLite, + generationConfig: generationConfig, + safetySettings: safetySettings + ) + let prompt = "Give me the value 15." - let response = try await model.generate(ConstrainedValue.self, from: prompt) + let response = try await model.respond(to: prompt, generating: ConstrainedValue.self) - let constrainedValue = response.content - #expect(constrainedValue.value == 15) - } + let constrainedValue = response.content + #expect(constrainedValue.value == 15) + } + #endif // compiler(>=6.2) @Test(arguments: testConfigs( instanceConfigs: InstanceConfig.allConfigs,