Skip to content
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions FirebaseAI/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,9 @@
types. (#14971)
- [added] Added support for configuring the "thinking" budget when using Gemini
2.5 series models. (#14909)
- [changed] Deprecated `CountTokensResponse.totalBillableCharacters`; use
`totalTokens` instead. Gemini 2.0 series models and newer are always billed by
token count. (#14998)

# 11.13.0
- [feature] Initial release of the Firebase AI Logic SDK (`FirebaseAI`). This
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,17 +39,31 @@ extension CountTokensRequest: GenerativeAIRequest {
/// The model's response to a count tokens request.
@available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
public struct CountTokensResponse: Sendable {
/// Container for deprecated properties or methods.
///
/// This workaround allows deprecated fields to be referenced internally (for example in the
/// `init(from:)` constructor) without introducing compiler warnings.
struct Deprecated {
let totalBillableCharacters: Int?
}

/// The total number of tokens in the input given to the model as a prompt.
public let totalTokens: Int

/// The total number of billable characters in the text input given to the model as a prompt.
///
/// > Important: This does not include billable image, video or other non-text input. See
/// [Vertex AI pricing](https://firebase.google.com/docs/vertex-ai/pricing) for details.
public let totalBillableCharacters: Int?
@available(*, deprecated, message: """
Use `totalTokens` instead; Gemini 2.0 series models and newer are always billed by token count.
""")
public var totalBillableCharacters: Int? { deprecated.totalBillableCharacters }

/// The breakdown, by modality, of how many tokens are consumed by the prompt.
public let promptTokensDetails: [ModalityTokenCount]

/// Deprecated properties or methods.
let deprecated: Deprecated
}

// MARK: - Codable Conformances
Expand Down Expand Up @@ -105,9 +119,10 @@ extension CountTokensResponse: Decodable {
public init(from decoder: any Decoder) throws {
let container = try decoder.container(keyedBy: CodingKeys.self)
totalTokens = try container.decodeIfPresent(Int.self, forKey: .totalTokens) ?? 0
totalBillableCharacters =
try container.decodeIfPresent(Int.self, forKey: .totalBillableCharacters)
promptTokensDetails =
try container.decodeIfPresent([ModalityTokenCount].self, forKey: .promptTokensDetails) ?? []
let totalBillableCharacters =
try container.decodeIfPresent(Int.self, forKey: .totalBillableCharacters)
deprecated = CountTokensResponse.Deprecated(totalBillableCharacters: totalBillableCharacters)
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -59,9 +59,9 @@ struct CountTokensIntegrationTests {
#expect(response.totalTokens == 6)
switch config.apiConfig.service {
case .vertexAI:
#expect(response.totalBillableCharacters == 16)
#expect(response.deprecated.totalBillableCharacters == 16)
case .googleAI:
#expect(response.totalBillableCharacters == nil)
#expect(response.deprecated.totalBillableCharacters == nil)
}
#expect(response.promptTokensDetails.count == 1)
let promptTokensDetails = try #require(response.promptTokensDetails.first)
Expand All @@ -83,9 +83,9 @@ struct CountTokensIntegrationTests {
#expect(response.totalTokens == 14)
switch config.apiConfig.service {
case .vertexAI:
#expect(response.totalBillableCharacters == 61)
#expect(response.deprecated.totalBillableCharacters == 61)
case .googleAI:
#expect(response.totalBillableCharacters == nil)
#expect(response.deprecated.totalBillableCharacters == nil)
}
#expect(response.promptTokensDetails.count == 1)
let promptTokensDetails = try #require(response.promptTokensDetails.first)
Expand Down Expand Up @@ -115,12 +115,12 @@ struct CountTokensIntegrationTests {
switch config.apiConfig.service {
case .vertexAI:
#expect(response.totalTokens == 65)
#expect(response.totalBillableCharacters == 170)
#expect(response.deprecated.totalBillableCharacters == 170)
case .googleAI:
// The Developer API erroneously ignores the `responseSchema` when counting tokens, resulting
// in a lower total count than Vertex AI.
#expect(response.totalTokens == 34)
#expect(response.totalBillableCharacters == nil)
#expect(response.deprecated.totalBillableCharacters == nil)
}
#expect(response.promptTokensDetails.count == 1)
let promptTokensDetails = try #require(response.promptTokensDetails.first)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@ import FirebaseCore
import FirebaseStorage
import XCTest

@testable import struct FirebaseAI.CountTokensRequest

// TODO(#14405): Migrate to Swift Testing and parameterize tests.
final class IntegrationTests: XCTestCase {
// Set temperature, topP and topK to lowest allowed values to make responses more deterministic.
Expand Down Expand Up @@ -83,7 +85,7 @@ final class IntegrationTests: XCTestCase {
let response = try await model.countTokens(prompt)

XCTAssertEqual(response.totalTokens, 14)
XCTAssertEqual(response.totalBillableCharacters, 51)
XCTAssertEqual(response.deprecated.totalBillableCharacters, 51)
XCTAssertEqual(response.promptTokensDetails.count, 1)
let promptTokensDetails = try XCTUnwrap(response.promptTokensDetails.first)
XCTAssertEqual(promptTokensDetails.modality, .text)
Expand All @@ -100,7 +102,7 @@ final class IntegrationTests: XCTestCase {
let response = try await model.countTokens(image)

XCTAssertEqual(response.totalTokens, 266)
XCTAssertEqual(response.totalBillableCharacters, 35)
XCTAssertEqual(response.deprecated.totalBillableCharacters, 35)
XCTAssertEqual(response.promptTokensDetails.count, 2) // Image prompt + system instruction
let textPromptTokensDetails = try XCTUnwrap(response.promptTokensDetails.first {
$0.modality == .text
Expand All @@ -120,7 +122,7 @@ final class IntegrationTests: XCTestCase {
let response = try await model.countTokens(fileData)

XCTAssertEqual(response.totalTokens, 266)
XCTAssertEqual(response.totalBillableCharacters, 35)
XCTAssertEqual(response.deprecated.totalBillableCharacters, 35)
XCTAssertEqual(response.promptTokensDetails.count, 2) // Image prompt + system instruction
let textPromptTokensDetails = try XCTUnwrap(response.promptTokensDetails.first {
$0.modality == .text
Expand All @@ -139,7 +141,7 @@ final class IntegrationTests: XCTestCase {
let response = try await model.countTokens(fileData)

XCTAssertEqual(response.totalTokens, 266)
XCTAssertEqual(response.totalBillableCharacters, 35)
XCTAssertEqual(response.deprecated.totalBillableCharacters, 35)
}

func testCountTokens_image_fileData_requiresUserAuth_userSignedIn() async throws {
Expand All @@ -150,7 +152,7 @@ final class IntegrationTests: XCTestCase {
let response = try await model.countTokens(fileData)

XCTAssertEqual(response.totalTokens, 266)
XCTAssertEqual(response.totalBillableCharacters, 35)
XCTAssertEqual(response.deprecated.totalBillableCharacters, 35)
}

func testCountTokens_image_fileData_requiresUserAuth_wrongUser_permissionDenied() async throws {
Expand Down Expand Up @@ -191,7 +193,7 @@ final class IntegrationTests: XCTestCase {
])

XCTAssertGreaterThan(response.totalTokens, 0)
XCTAssertEqual(response.totalBillableCharacters, 71)
XCTAssertEqual(response.deprecated.totalBillableCharacters, 71)
XCTAssertEqual(response.promptTokensDetails.count, 1)
let promptTokensDetails = try XCTUnwrap(response.promptTokensDetails.first)
XCTAssertEqual(promptTokensDetails.modality, .text)
Expand Down
8 changes: 4 additions & 4 deletions FirebaseAI/Tests/Unit/GenerativeModelVertexAITests.swift
Original file line number Diff line number Diff line change
Expand Up @@ -1517,7 +1517,7 @@ final class GenerativeModelVertexAITests: XCTestCase {
let response = try await model.countTokens("Why is the sky blue?")

XCTAssertEqual(response.totalTokens, 6)
XCTAssertEqual(response.totalBillableCharacters, 16)
XCTAssertEqual(response.deprecated.totalBillableCharacters, 16)
}

func testCountTokens_succeeds_detailed() async throws {
Expand All @@ -1530,7 +1530,7 @@ final class GenerativeModelVertexAITests: XCTestCase {
let response = try await model.countTokens("Why is the sky blue?")

XCTAssertEqual(response.totalTokens, 1837)
XCTAssertEqual(response.totalBillableCharacters, 117)
XCTAssertEqual(response.deprecated.totalBillableCharacters, 117)
XCTAssertEqual(response.promptTokensDetails.count, 2)
XCTAssertEqual(response.promptTokensDetails[0].modality, .image)
XCTAssertEqual(response.promptTokensDetails[0].tokenCount, 1806)
Expand Down Expand Up @@ -1577,7 +1577,7 @@ final class GenerativeModelVertexAITests: XCTestCase {
let response = try await model.countTokens("Why is the sky blue?")

XCTAssertEqual(response.totalTokens, 6)
XCTAssertEqual(response.totalBillableCharacters, 16)
XCTAssertEqual(response.deprecated.totalBillableCharacters, 16)
}

func testCountTokens_succeeds_noBillableCharacters() async throws {
Expand All @@ -1590,7 +1590,7 @@ final class GenerativeModelVertexAITests: XCTestCase {
let response = try await model.countTokens(InlineDataPart(data: Data(), mimeType: "image/jpeg"))

XCTAssertEqual(response.totalTokens, 258)
XCTAssertNil(response.totalBillableCharacters)
XCTAssertNil(response.deprecated.totalBillableCharacters)
}

func testCountTokens_modelNotFound() async throws {
Expand Down
Loading