Skip to content

Commit 041abe6

Browse files
committed
fix: get llm token usage add result type
1 parent 1d22a1f commit 041abe6

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

src/backend/bisheng/llm/domain/utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -57,15 +57,15 @@ def parse_token_usage(result: Any) -> tuple[int, int, int, int]:
5757
if isinstance(result, ChatResult):
5858
for generation in result.generations:
5959
token_usage = generation.generation_info.get('token_usage', {}) or generation.message.response_metadata.get(
60-
'token_usage', {})
60+
'token_usage', {}) or generation.message.usage_metadata
6161
tmp1, tmp2, tmp3, tmp4 = get_token_from_usage(token_usage)
6262
input_token += tmp1
6363
output_token += tmp2
6464
cache_token += tmp3
6565
total_token += tmp4
6666
elif isinstance(result, ChatGenerationChunk):
6767
token_usage = result.message.response_metadata.get('token_usage', {}) or result.generation_info.get(
68-
'token_usage', {})
68+
'token_usage', {}) or result.message.usage_metadata
6969
input_token, output_token, cache_token, total_token = get_token_from_usage(token_usage)
7070
else:
7171
logger.warning(f'unknown result type: {type(result)}')

0 commit comments

Comments
 (0)