-
Notifications
You must be signed in to change notification settings - Fork 0
Updates to Python tracking #3
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 1 commit
f52bf8f
dc073a4
6c0742d
fe50860
bff84ef
1b5e2a5
f68fa4d
6b4f3fc
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,23 +1,5 @@ | ||
from typing import Union | ||
from ldai.types import BedrockTokenUsage, TokenMetrics, TokenUsage, UnderscoreTokenUsage | ||
from ldai.types import BedrockTokenUsage, TokenMetrics, OpenAITokenUsage, UnderscoreTokenUsage | ||
|
||
def usage_to_token_metrics(usage: Union[TokenUsage, UnderscoreTokenUsage, BedrockTokenUsage]) -> TokenMetrics: | ||
def get_attr(obj, attr, default=0): | ||
if isinstance(obj, dict): | ||
return obj.get(attr, default) | ||
return getattr(obj, attr, default) | ||
|
||
if 'inputTokens' in usage and 'outputTokens' in usage: | ||
# Bedrock usage | ||
return { | ||
'total': get_attr(usage, 'totalTokens'), | ||
'input': get_attr(usage, 'inputTokens'), | ||
'output': get_attr(usage, 'outputTokens'), | ||
} | ||
|
||
# OpenAI usage (both camelCase and snake_case) | ||
return { | ||
'total': get_attr(usage, 'total_tokens', get_attr(usage, 'totalTokens', 0)), | ||
'input': get_attr(usage, 'prompt_tokens', get_attr(usage, 'promptTokens', 0)), | ||
'output': get_attr(usage, 'completion_tokens', get_attr(usage, 'completionTokens', 0)), | ||
} | ||
def usage_to_token_metrics(usage: Union[OpenAITokenUsage, UnderscoreTokenUsage, BedrockTokenUsage]) -> TokenMetrics: | ||
return usage.to_metrics() |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,5 +1,32 @@ | ||
from enum import Enum | ||
from typing import TypedDict | ||
from typing import Callable, TypedDict | ||
from dataclasses import dataclass | ||
|
||
@dataclass | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I don't typically see both There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @keelerm84 this is a lack of knowledge on my part. I switched them over to dataclasses in the following commit. |
||
class TokenMetrics(TypedDict): | ||
total: int | ||
input: int | ||
output: int # type: ignore | ||
|
||
class AIConfigData(TypedDict): | ||
config: dict | ||
prompt: any | ||
_ldMeta: dict | ||
|
||
class AITracker(TypedDict): | ||
track_duration: Callable[..., None] | ||
track_tokens: Callable[..., None] | ||
track_error: Callable[..., None] | ||
track_generation: Callable[..., None] | ||
track_feedback: Callable[..., None] | ||
|
||
class AIConfig(): | ||
def __init__(self, config: AIConfigData, tracker: AITracker): | ||
self._config = config | ||
self._tracker = tracker | ||
|
||
config: AIConfigData | ||
tracker: AITracker | ||
InTheCloudDan marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
class FeedbackKind(Enum): | ||
Positive = "positive" | ||
|
@@ -10,17 +37,48 @@ class TokenUsage(TypedDict): | |
prompt_tokens: int | ||
completion_tokens: int | ||
|
||
class UnderscoreTokenUsage(TypedDict): | ||
total_tokens: int | ||
prompt_tokens: int | ||
completion_tokens: int | ||
def to_metrics(self): | ||
return { | ||
'total': self['total_tokens'], | ||
'input': self['prompt_tokens'], | ||
'output': self['completion_tokens'], | ||
} | ||
|
||
class BedrockTokenUsage(TypedDict): | ||
totalTokens: int | ||
inputTokens: int | ||
outputTokens: int | ||
class OpenAITokenUsage: | ||
def __init__(self, data: any): | ||
self.total_tokens = data.total_tokens | ||
self.prompt_tokens = data.prompt_tokens | ||
self.completion_tokens = data.completion_tokens | ||
|
||
class TokenMetrics(TypedDict): | ||
total: int | ||
input: int | ||
output: int # type: ignore | ||
def to_metrics(self) -> TokenMetrics: | ||
return { | ||
'total': self.total_tokens, | ||
'input': self.prompt_tokens, | ||
'output': self.completion_tokens, | ||
} | ||
|
||
class UnderscoreTokenUsage: | ||
def __init__(self, data: dict): | ||
self.total_tokens = data.get('total_tokens', 0) | ||
self.prompt_tokens = data.get('prompt_tokens', 0) | ||
self.completion_tokens = data.get('completion_tokens', 0) | ||
|
||
def to_metrics(self) -> TokenMetrics: | ||
return { | ||
'total': self.total_tokens, | ||
'input': self.prompt_tokens, | ||
'output': self.completion_tokens, | ||
} | ||
|
||
class BedrockTokenUsage: | ||
def __init__(self, data: dict): | ||
self.totalTokens = data.get('totalTokens', 0) | ||
self.inputTokens = data.get('inputTokens', 0) | ||
self.outputTokens = data.get('outputTokens', 0) | ||
|
||
def to_metrics(self) -> TokenMetrics: | ||
return { | ||
'total': self.totalTokens, | ||
'input': self.inputTokens, | ||
'output': self.outputTokens, | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
It looks like this is returning a dictionary, not an
AIConfig
type.Also just noticed there is a rogue print in there.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@keelerm84 updated the logic to return an AIConfig