Skip to content

Commit a503622

Browse files
committed
fix(vertexai): make token-based metrics non nullable
If missing, they'll default to empty.
1 parent b1bd93f commit a503622

File tree

3 files changed

+65
-23
lines changed

3 files changed

+65
-23
lines changed

packages/firebase_vertexai/firebase_vertexai/lib/src/api.dart

Lines changed: 14 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,8 @@ import 'schema.dart';
1919
/// Response for Count Tokens
2020
final class CountTokensResponse {
2121
/// Constructor
22-
CountTokensResponse(this.totalTokens,
23-
{this.totalBillableCharacters, this.promptTokensDetails});
22+
CountTokensResponse(
23+
this.totalTokens, this.totalBillableCharacters, this.promptTokensDetails);
2424

2525
/// The number of tokens that the `model` tokenizes the `prompt` into.
2626
///
@@ -33,7 +33,7 @@ final class CountTokensResponse {
3333
final int? totalBillableCharacters;
3434

3535
/// List of modalities that were processed in the request input.
36-
final List<ModalityTokenCount>? promptTokensDetails;
36+
final List<ModalityTokenCount> promptTokensDetails;
3737
}
3838

3939
/// Response from the model; supports multiple candidates.
@@ -133,11 +133,11 @@ final class PromptFeedback {
133133
final class UsageMetadata {
134134
/// Constructor
135135
UsageMetadata._(
136-
{this.promptTokenCount,
136+
this.promptTokenCount,
137137
this.candidatesTokenCount,
138138
this.totalTokenCount,
139139
this.promptTokensDetails,
140-
this.candidatesTokensDetails});
140+
this.candidatesTokensDetails);
141141

142142
/// Number of tokens in the prompt.
143143
final int? promptTokenCount;
@@ -149,10 +149,10 @@ final class UsageMetadata {
149149
final int? totalTokenCount;
150150

151151
/// List of modalities that were processed in the request input.
152-
final List<ModalityTokenCount>? promptTokensDetails;
152+
final List<ModalityTokenCount> promptTokensDetails;
153153

154154
/// List of modalities that were returned in the response.
155-
final List<ModalityTokenCount>? candidatesTokensDetails;
155+
final List<ModalityTokenCount> candidatesTokensDetails;
156156
}
157157

158158
/// Response candidate generated from a [GenerativeModel].
@@ -777,13 +777,13 @@ CountTokensResponse parseCountTokensResponse(Object jsonObject) {
777777
final promptTokensDetails = switch (jsonObject) {
778778
{'promptTokensDetails': final List<Object?> promptTokensDetails} =>
779779
promptTokensDetails.map(_parseModalityTokenCount).toList(),
780-
_ => null,
780+
_ => <ModalityTokenCount>[],
781781
};
782782

783783
return CountTokensResponse(
784784
totalTokens,
785-
totalBillableCharacters: totalBillableCharacters,
786-
promptTokensDetails: promptTokensDetails,
785+
totalBillableCharacters,
786+
promptTokensDetails,
787787
);
788788
}
789789

@@ -859,19 +859,15 @@ UsageMetadata _parseUsageMetadata(Object jsonObject) {
859859
final promptTokensDetails = switch (jsonObject) {
860860
{'promptTokensDetails': final List<Object?> promptTokensDetails} =>
861861
promptTokensDetails.map(_parseModalityTokenCount).toList(),
862-
_ => null,
862+
_ => <ModalityTokenCount>[],
863863
};
864864
final candidatesTokensDetails = switch (jsonObject) {
865865
{'candidatesTokensDetails': final List<Object?> candidatesTokensDetails} =>
866866
candidatesTokensDetails.map(_parseModalityTokenCount).toList(),
867-
_ => null,
867+
_ => <ModalityTokenCount>[],
868868
};
869-
return UsageMetadata._(
870-
promptTokenCount: promptTokenCount,
871-
candidatesTokenCount: candidatesTokenCount,
872-
totalTokenCount: totalTokenCount,
873-
promptTokensDetails: promptTokensDetails,
874-
candidatesTokensDetails: candidatesTokensDetails);
869+
return UsageMetadata._(promptTokenCount, candidatesTokenCount,
870+
totalTokenCount, promptTokensDetails, candidatesTokensDetails);
875871
}
876872

877873
ModalityTokenCount _parseModalityTokenCount(Object? jsonObject) {

packages/firebase_vertexai/firebase_vertexai/test/model_test.dart

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -434,7 +434,8 @@ void main() {
434434
},
435435
response: {'totalTokens': 2},
436436
);
437-
expect(response, matchesCountTokensResponse(CountTokensResponse(2)));
437+
expect(response,
438+
matchesCountTokensResponse(CountTokensResponse(2, null, [])));
438439
});
439440
});
440441
});

packages/firebase_vertexai/firebase_vertexai/test/response_parsing_test.dart

Lines changed: 49 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -656,6 +656,38 @@ void main() {
656656

657657
test('response including usage metadata', () async {
658658
const response = '''
659+
{
660+
"candidates": [{
661+
"content": {
662+
"role": "model",
663+
"parts": [{
664+
"text": "Here is a description of the image:"
665+
}]
666+
},
667+
"finishReason": "STOP"
668+
}],
669+
"usageMetadata": {
670+
"promptTokenCount": 1837,
671+
"candidatesTokenCount": 76,
672+
"totalTokenCount": 1913
673+
}
674+
}
675+
''';
676+
final decoded = jsonDecode(response) as Object;
677+
final generateContentResponse = parseGenerateContentResponse(decoded);
678+
expect(
679+
generateContentResponse.text, 'Here is a description of the image:');
680+
expect(generateContentResponse.usageMetadata?.totalTokenCount, 1913);
681+
expect(generateContentResponse.usageMetadata?.promptTokensDetails.isEmpty,
682+
true);
683+
expect(
684+
generateContentResponse
685+
.usageMetadata?.candidatesTokensDetails.isEmpty,
686+
true);
687+
});
688+
689+
test('response including usage metadata with token details', () async {
690+
const response = '''
659691
{
660692
"candidates": [{
661693
"content": {
@@ -691,22 +723,35 @@ void main() {
691723
expect(generateContentResponse.usageMetadata?.totalTokenCount, 1913);
692724
expect(
693725
generateContentResponse
694-
.usageMetadata?.promptTokensDetails?[1].modality,
726+
.usageMetadata?.promptTokensDetails[1].modality,
695727
ContentModality.image);
696728
expect(
697729
generateContentResponse
698-
.usageMetadata?.promptTokensDetails?[1].tokenCount,
730+
.usageMetadata?.promptTokensDetails[1].tokenCount,
699731
1806);
700732
expect(
701733
generateContentResponse
702-
.usageMetadata?.candidatesTokensDetails?.first.modality,
734+
.usageMetadata?.candidatesTokensDetails.first.modality,
703735
ContentModality.text);
704736
expect(
705737
generateContentResponse
706-
.usageMetadata?.candidatesTokensDetails?.first.tokenCount,
738+
.usageMetadata?.candidatesTokensDetails.first.tokenCount,
707739
76);
708740
});
709741

742+
test('countTokens simple response', () async {
743+
const response = '''
744+
{
745+
"totalTokens": 1837,
746+
"totalBillableCharacters": 117
747+
}
748+
''';
749+
final decoded = jsonDecode(response) as Object;
750+
final countTokensResponse = parseCountTokensResponse(decoded);
751+
expect(countTokensResponse.totalTokens, 1837);
752+
expect(countTokensResponse.promptTokensDetails.isEmpty, true);
753+
});
754+
710755
test('countTokens with modality fields returned', () async {
711756
const response = '''
712757
{

0 commit comments

Comments
 (0)