Skip to content

Commit d5b6dec

Browse files
[Firebase AI] Fix Google AI useLimitedUseAppCheckTokens config (#15423)
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
1 parent ff0eac7 commit d5b6dec

File tree

2 files changed

+21
-24
lines changed

2 files changed

+21
-24
lines changed

FirebaseAI/Tests/TestApp/Tests/Integration/LiveSessionTests.swift

Lines changed: 19 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -61,20 +61,25 @@ struct LiveSessionTests {
6161
static let yesOrNo = ModelContent(
6262
role: "system",
6363
parts: """
64-
You can only respond with "yes" or "no".
64+
You can only respond with "yes" or "no".
6565
""".trimmingCharacters(in: .whitespacesAndNewlines)
6666
)
6767

6868
static let helloGoodbye = ModelContent(
6969
role: "system",
7070
parts: """
71-
When you hear "Hello" say "Goodbye". If you hear anything else, say "The audio file is broken".
71+
When you hear "Hello" say "Goodbye". If you hear anything else, say "The audio file is \
72+
broken".
7273
""".trimmingCharacters(in: .whitespacesAndNewlines)
7374
)
7475

7576
static let lastNames = ModelContent(
7677
role: "system",
77-
parts: "When you receive a message, if the message is a single word, assume it's the first name of a person, and call the getLastName tool to get the last name of said person. Only respond with the last name."
78+
parts: """
79+
When you receive a message, if the message is a single word, assume it's the first name of a \
80+
person, and call the getLastName tool to get the last name of said person. Only respond with \
81+
the last name.
82+
""".trimmingCharacters(in: .whitespacesAndNewlines)
7883
)
7984

8085
static let animalInVideo = ModelContent(
@@ -142,10 +147,9 @@ struct LiveSessionTests {
142147

143148
let session = try await model.connect()
144149

145-
guard let audioFile = NSDataAsset(name: "hello") else {
146-
Issue.record("Missing audio file 'hello.wav' in Assets")
147-
return
148-
}
150+
let audioFile = try #require(
151+
NSDataAsset(name: "hello"), "Missing audio file 'hello.wav' in Assets"
152+
)
149153
await session.sendAudioRealtime(audioFile.data)
150154
// The model can't infer that we're done speaking until we send null bytes
151155
await session.sendAudioRealtime(Data(repeating: 0, count: audioFile.data.count))
@@ -171,10 +175,9 @@ struct LiveSessionTests {
171175

172176
let session = try await model.connect()
173177

174-
guard let audioFile = NSDataAsset(name: "hello") else {
175-
Issue.record("Missing audio file 'hello.wav' in Assets")
176-
return
177-
}
178+
let audioFile = try #require(
179+
NSDataAsset(name: "hello"), "Missing audio file 'hello.wav' in Assets"
180+
)
178181
await session.sendAudioRealtime(audioFile.data)
179182
await session.sendAudioRealtime(Data(repeating: 0, count: audioFile.data.count))
180183

@@ -281,7 +284,7 @@ struct LiveSessionTests {
281284
}
282285

283286
@Test(arguments: arguments.filter {
284-
// TODO: (b/450982184) Remove when vertex adds support
287+
// TODO: (b/450982184) Remove when Vertex AI adds support for Function IDs and Cancellation
285288
switch $0.0.apiConfig.service {
286289
case .googleAI:
287290
true
@@ -291,12 +294,6 @@ struct LiveSessionTests {
291294
})
292295
func realtime_functionCalling_cancellation(_ config: InstanceConfig,
293296
modelName: String) async throws {
294-
// TODO: (b/450982184) Remove when vertex adds support
295-
guard case .googleAI = config.apiConfig.service else {
296-
Issue.record("Vertex does not currently support function ids or function cancellation.")
297-
return
298-
}
299-
300297
let model = FirebaseAI.componentInstance(config).liveModel(
301298
modelName: modelName,
302299
generationConfig: textConfig,
@@ -337,17 +334,16 @@ struct LiveSessionTests {
337334
generationConfig: audioConfig
338335
)
339336

340-
guard let audioFile = NSDataAsset(name: "hello") else {
341-
Issue.record("Missing audio file 'hello.wav' in Assets")
342-
return
343-
}
337+
let audioFile = try #require(
338+
NSDataAsset(name: "hello"), "Missing audio file 'hello.wav' in Assets"
339+
)
344340

345341
try await retry(times: 3, delayInSeconds: 2.0) {
346342
let session = try await model.connect()
347343
await session.sendAudioRealtime(audioFile.data)
348344
await session.sendAudioRealtime(Data(repeating: 0, count: audioFile.data.count))
349345

350-
// wait a second to allow the model to start generating (and cuase a proper interruption)
346+
// Wait a second to allow the model to start generating (and cause a proper interruption)
351347
try await Task.sleep(nanoseconds: oneSecondInNanoseconds)
352348
await session.sendAudioRealtime(audioFile.data)
353349
await session.sendAudioRealtime(Data(repeating: 0, count: audioFile.data.count))

FirebaseAI/Tests/TestApp/Tests/Utilities/InstanceConfig.swift

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,7 @@ struct InstanceConfig: Equatable, Encodable {
5656
apiConfig: APIConfig(service: .googleAI(endpoint: .firebaseProxyProd), version: .v1beta)
5757
)
5858
static let googleAI_v1beta_appCheckLimitedUse = InstanceConfig(
59+
useLimitedUseAppCheckTokens: true,
5960
apiConfig: APIConfig(service: .googleAI(endpoint: .firebaseProxyProd), version: .v1beta)
6061
)
6162
static let googleAI_v1beta_staging = InstanceConfig(
@@ -164,7 +165,7 @@ extension InstanceConfig: CustomTestStringConvertible {
164165
}
165166
let locationSuffix: String
166167
if case let .vertexAI(_, location: location) = apiConfig.service {
167-
locationSuffix = location
168+
locationSuffix = " - (\(location))"
168169
} else {
169170
locationSuffix = ""
170171
}

0 commit comments

Comments
 (0)