|  | 
|  | 1 | +// Copyright 2025 Google LLC | 
|  | 2 | +// | 
|  | 3 | +// Licensed under the Apache License, Version 2.0 (the "License"); | 
|  | 4 | +// you may not use this file except in compliance with the License. | 
|  | 5 | +// You may obtain a copy of the License at | 
|  | 6 | +// | 
|  | 7 | +//      http://www.apache.org/licenses/LICENSE-2.0 | 
|  | 8 | +// | 
|  | 9 | +// Unless required by applicable law or agreed to in writing, software | 
|  | 10 | +// distributed under the License is distributed on an "AS IS" BASIS, | 
|  | 11 | +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | 
|  | 12 | +// See the License for the specific language governing permissions and | 
|  | 13 | +// limitations under the License. | 
|  | 14 | + | 
|  | 15 | +import FirebaseAI | 
|  | 16 | +import FirebaseCore | 
|  | 17 | +import XCTest | 
|  | 18 | + | 
|  | 19 | +// These snippet tests are intentionally skipped in CI jobs; see the README file in this directory | 
|  | 20 | +// for instructions on running them manually. | 
|  | 21 | + | 
|  | 22 | +@available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *) | 
|  | 23 | +@available(watchOS, unavailable) | 
|  | 24 | +final class LiveSnippets: XCTestCase { | 
|  | 25 | +  override func setUpWithError() throws { | 
|  | 26 | +    try FirebaseApp.configureDefaultAppForSnippets() | 
|  | 27 | +  } | 
|  | 28 | + | 
|  | 29 | +  override func tearDown() async throws { | 
|  | 30 | +    await FirebaseApp.deleteDefaultAppForSnippets() | 
|  | 31 | +  } | 
|  | 32 | + | 
|  | 33 | +  func sendAudioReceiveAudio() async throws { | 
|  | 34 | +    // Initialize the Vertex AI Gemini API backend service | 
|  | 35 | +    // Set the location to `us-central1` (the flash-live model is only supported in that location) | 
|  | 36 | +    // Create a `LiveGenerativeModel` instance with the flash-live model (only model that supports | 
|  | 37 | +    // the Live API) | 
|  | 38 | +    let model = FirebaseAI.firebaseAI(backend: .vertexAI(location: "us-central1")).liveModel( | 
|  | 39 | +      modelName: "gemini-2.0-flash-exp", | 
|  | 40 | +      // Configure the model to respond with audio | 
|  | 41 | +      generationConfig: LiveGenerationConfig( | 
|  | 42 | +        responseModalities: [.audio] | 
|  | 43 | +      ) | 
|  | 44 | +    ) | 
|  | 45 | + | 
|  | 46 | +    do { | 
|  | 47 | +      let session = try await model.connect() | 
|  | 48 | + | 
|  | 49 | +      // Load the audio file, or tap a microphone | 
|  | 50 | +      guard let audioFile = NSDataAsset(name: "audio.pcm") else { | 
|  | 51 | +        fatalError("Failed to load audio file") | 
|  | 52 | +      } | 
|  | 53 | + | 
|  | 54 | +      // Provide the audio data | 
|  | 55 | +      await session.sendAudioRealtime(audioFile.data) | 
|  | 56 | + | 
|  | 57 | +      for try await message in session.responses { | 
|  | 58 | +        if case let .content(content) = message.payload { | 
|  | 59 | +          content.modelTurn?.parts.forEach { part in | 
|  | 60 | +            if let part = part as? InlineDataPart, part.mimeType.starts(with: "audio/pcm") { | 
|  | 61 | +              // Handle 16bit pcm audio data at 24khz | 
|  | 62 | +              playAudio(part.data) | 
|  | 63 | +            } | 
|  | 64 | +          } | 
|  | 65 | +          // Optional: if you don't require to send more requests. | 
|  | 66 | +          if content.isTurnComplete { | 
|  | 67 | +            await session.close() | 
|  | 68 | +          } | 
|  | 69 | +        } | 
|  | 70 | +      } | 
|  | 71 | +    } catch { | 
|  | 72 | +      fatalError(error.localizedDescription) | 
|  | 73 | +    } | 
|  | 74 | +  } | 
|  | 75 | + | 
|  | 76 | +  func sendAudioReceiveText() async throws { | 
|  | 77 | +    // Initialize the Vertex AI Gemini API backend service | 
|  | 78 | +    // Set the location to `us-central1` (the flash-live model is only supported in that location) | 
|  | 79 | +    // Create a `LiveGenerativeModel` instance with the flash-live model (only model that supports | 
|  | 80 | +    // the Live API) | 
|  | 81 | +    let model = FirebaseAI.firebaseAI(backend: .googleAI()).liveModel( | 
|  | 82 | +      modelName: "gemini-live-2.5-flash-preview", | 
|  | 83 | +      // Configure the model to respond with text | 
|  | 84 | +      generationConfig: LiveGenerationConfig( | 
|  | 85 | +        responseModalities: [.text] | 
|  | 86 | +      ) | 
|  | 87 | +    ) | 
|  | 88 | + | 
|  | 89 | +    do { | 
|  | 90 | +      let session = try await model.connect() | 
|  | 91 | + | 
|  | 92 | +      // Load the audio file, or tap a microphone | 
|  | 93 | +      guard let audioFile = NSDataAsset(name: "audio.pcm") else { | 
|  | 94 | +        fatalError("Failed to load audio file") | 
|  | 95 | +      } | 
|  | 96 | + | 
|  | 97 | +      // Provide the audio data | 
|  | 98 | +      await session.sendAudioRealtime(audioFile.data) | 
|  | 99 | + | 
|  | 100 | +      var outputText = "" | 
|  | 101 | +      for try await message in session.responses { | 
|  | 102 | +        if case let .content(content) = message.payload { | 
|  | 103 | +          content.modelTurn?.parts.forEach { part in | 
|  | 104 | +            if let part = part as? TextPart { | 
|  | 105 | +              outputText += part.text | 
|  | 106 | +            } | 
|  | 107 | +          } | 
|  | 108 | +          // Optional: if you don't require to send more requests. | 
|  | 109 | +          if content.isTurnComplete { | 
|  | 110 | +            await session.close() | 
|  | 111 | +          } | 
|  | 112 | +        } | 
|  | 113 | +      } | 
|  | 114 | + | 
|  | 115 | +      // Output received from the server. | 
|  | 116 | +      print(outputText) | 
|  | 117 | +    } catch { | 
|  | 118 | +      fatalError(error.localizedDescription) | 
|  | 119 | +    } | 
|  | 120 | +  } | 
|  | 121 | + | 
|  | 122 | +  func sendTextReceiveAudio() async throws { | 
|  | 123 | +    // Initialize the Gemini Developer API backend service | 
|  | 124 | +    // Create a `LiveModel` instance with the flash-live model (only model that supports the Live | 
|  | 125 | +    // API) | 
|  | 126 | +    let model = FirebaseAI.firebaseAI(backend: .googleAI()).liveModel( | 
|  | 127 | +      modelName: "gemini-live-2.5-flash-preview", | 
|  | 128 | +      // Configure the model to respond with audio | 
|  | 129 | +      generationConfig: LiveGenerationConfig( | 
|  | 130 | +        responseModalities: [.audio] | 
|  | 131 | +      ) | 
|  | 132 | +    ) | 
|  | 133 | + | 
|  | 134 | +    do { | 
|  | 135 | +      let session = try await model.connect() | 
|  | 136 | + | 
|  | 137 | +      // Provide a text prompt | 
|  | 138 | +      let text = "tell a short story" | 
|  | 139 | + | 
|  | 140 | +      await session.sendTextRealtime(text) | 
|  | 141 | + | 
|  | 142 | +      for try await message in session.responses { | 
|  | 143 | +        if case let .content(content) = message.payload { | 
|  | 144 | +          content.modelTurn?.parts.forEach { part in | 
|  | 145 | +            if let part = part as? InlineDataPart, part.mimeType.starts(with: "audio/pcm") { | 
|  | 146 | +              // Handle 16bit pcm audio data at 24khz | 
|  | 147 | +              playAudio(part.data) | 
|  | 148 | +            } | 
|  | 149 | +          } | 
|  | 150 | +          // Optional: if you don't require to send more requests. | 
|  | 151 | +          if content.isTurnComplete { | 
|  | 152 | +            await session.close() | 
|  | 153 | +          } | 
|  | 154 | +        } | 
|  | 155 | +      } | 
|  | 156 | +    } catch { | 
|  | 157 | +      fatalError(error.localizedDescription) | 
|  | 158 | +    } | 
|  | 159 | +  } | 
|  | 160 | + | 
|  | 161 | +  func sendTextReceiveText() async throws { | 
|  | 162 | +    // Initialize the Gemini Developer API backend service | 
|  | 163 | +    // Create a `LiveModel` instance with the flash-live model (only model that supports the Live | 
|  | 164 | +    // API) | 
|  | 165 | +    let model = FirebaseAI.firebaseAI(backend: .googleAI()).liveModel( | 
|  | 166 | +      modelName: "gemini-live-2.5-flash-preview", | 
|  | 167 | +      // Configure the model to respond with audio | 
|  | 168 | +      generationConfig: LiveGenerationConfig( | 
|  | 169 | +        responseModalities: [.audio] | 
|  | 170 | +      ) | 
|  | 171 | +    ) | 
|  | 172 | + | 
|  | 173 | +    do { | 
|  | 174 | +      let session = try await model.connect() | 
|  | 175 | + | 
|  | 176 | +      // Provide a text prompt | 
|  | 177 | +      let text = "tell a short story" | 
|  | 178 | + | 
|  | 179 | +      await session.sendTextRealtime(text) | 
|  | 180 | + | 
|  | 181 | +      for try await message in session.responses { | 
|  | 182 | +        if case let .content(content) = message.payload { | 
|  | 183 | +          content.modelTurn?.parts.forEach { part in | 
|  | 184 | +            if let part = part as? InlineDataPart, part.mimeType.starts(with: "audio/pcm") { | 
|  | 185 | +              // Handle 16bit pcm audio data at 24khz | 
|  | 186 | +              playAudio(part.data) | 
|  | 187 | +            } | 
|  | 188 | +          } | 
|  | 189 | +          // Optional: if you don't require to send more requests. | 
|  | 190 | +          if content.isTurnComplete { | 
|  | 191 | +            await session.close() | 
|  | 192 | +          } | 
|  | 193 | +        } | 
|  | 194 | +      } | 
|  | 195 | +    } catch { | 
|  | 196 | +      fatalError(error.localizedDescription) | 
|  | 197 | +    } | 
|  | 198 | +  } | 
|  | 199 | + | 
|  | 200 | +  func changeVoiceAndLanguage() { | 
|  | 201 | +    let model = FirebaseAI.firebaseAI(backend: .googleAI()).liveModel( | 
|  | 202 | +      modelName: "gemini-live-2.5-flash-preview", | 
|  | 203 | +      // Configure the model to use a specific voice for its audio response | 
|  | 204 | +      generationConfig: LiveGenerationConfig( | 
|  | 205 | +        responseModalities: [.audio], | 
|  | 206 | +        speech: SpeechConfig(voiceName: "Fenrir") | 
|  | 207 | +      ) | 
|  | 208 | +    ) | 
|  | 209 | + | 
|  | 210 | +    // Not part of snippet | 
|  | 211 | +    silenceWarning(model) | 
|  | 212 | +  } | 
|  | 213 | + | 
|  | 214 | +  func modelParameters() { | 
|  | 215 | +    // ... | 
|  | 216 | + | 
|  | 217 | +    // Set parameter values in a `LiveGenerationConfig` (example values shown here) | 
|  | 218 | +    let config = LiveGenerationConfig( | 
|  | 219 | +      temperature: 0.9, | 
|  | 220 | +      topP: 0.1, | 
|  | 221 | +      topK: 16, | 
|  | 222 | +      maxOutputTokens: 200, | 
|  | 223 | +      responseModalities: [.audio], | 
|  | 224 | +      speech: SpeechConfig(voiceName: "Fenrir") | 
|  | 225 | +    ) | 
|  | 226 | + | 
|  | 227 | +    // Initialize the Vertex AI Gemini API backend service | 
|  | 228 | +    // Specify the config as part of creating the `LiveGenerativeModel` instance | 
|  | 229 | +    let model = FirebaseAI.firebaseAI(backend: .googleAI()).liveModel( | 
|  | 230 | +      modelName: "gemini-live-2.5-flash-preview", | 
|  | 231 | +      generationConfig: config | 
|  | 232 | +    ) | 
|  | 233 | + | 
|  | 234 | +    // ... | 
|  | 235 | + | 
|  | 236 | +    // Not part of snippet | 
|  | 237 | +    silenceWarning(model) | 
|  | 238 | +  } | 
|  | 239 | + | 
|  | 240 | +  func systemInstructions() { | 
|  | 241 | +    // Specify the system instructions as part of creating the `LiveGenerativeModel` instance | 
|  | 242 | +    let model = FirebaseAI.firebaseAI(backend: .googleAI()).liveModel( | 
|  | 243 | +      modelName: "gemini-live-2.5-flash-preview", | 
|  | 244 | +      systemInstruction: ModelContent(role: "system", parts: "You are a cat. Your name is Neko.") | 
|  | 245 | +    ) | 
|  | 246 | + | 
|  | 247 | +    // Not part of snippet | 
|  | 248 | +    silenceWarning(model) | 
|  | 249 | +  } | 
|  | 250 | + | 
|  | 251 | +  private func playAudio(_ data: Data) { | 
|  | 252 | +    // Use AVAudioPlayerNode or something akin to play back audio | 
|  | 253 | +  } | 
|  | 254 | + | 
|  | 255 | +  /// This function only exists to silence the "unused value" warnings. | 
|  | 256 | +  /// | 
|  | 257 | +  /// This allows us to ensure the snippets match devsite. | 
|  | 258 | +  private func silenceWarning(_ model: LiveGenerativeModel) {} | 
|  | 259 | +} | 
0 commit comments