Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
79 changes: 73 additions & 6 deletions ldai/testing/test_tracker.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,7 @@ def test_tracks_bedrock_metrics(client: LDClient):

calls = [
call('$ld:ai:generation', context, {'variationKey': 'variation-key', 'configKey': 'config-key'}, 1),
call('$ld:ai:generation:success', context, {'variationKey': 'variation-key', 'configKey': 'config-key'}, 1),
call('$ld:ai:duration:total', context, {'variationKey': 'variation-key', 'configKey': 'config-key'}, 50),
call('$ld:ai:tokens:total', context, {'variationKey': 'variation-key', 'configKey': 'config-key'}, 330),
call('$ld:ai:tokens:input', context, {'variationKey': 'variation-key', 'configKey': 'config-key'}, 220),
Expand All @@ -110,6 +111,39 @@ def test_tracks_bedrock_metrics(client: LDClient):
assert tracker.get_summary().usage == TokenUsage(330, 220, 110)


def test_tracks_bedrock_metrics_with_error(client: LDClient):
context = Context.create('user-key')
tracker = LDAIConfigTracker(client, "variation-key", "config-key", context)

bedrock_result = {
'$metadata': {'httpStatusCode': 500},
'usage': {
'totalTokens': 330,
'inputTokens': 220,
'outputTokens': 110,
},
'metrics': {
'latencyMs': 50,
}
}
tracker.track_bedrock_converse_metrics(bedrock_result)

calls = [
call('$ld:ai:generation', context, {'variationKey': 'variation-key', 'configKey': 'config-key'}, 1),
call('$ld:ai:generation:error', context, {'variationKey': 'variation-key', 'configKey': 'config-key'}, 1),
call('$ld:ai:duration:total', context, {'variationKey': 'variation-key', 'configKey': 'config-key'}, 50),
call('$ld:ai:tokens:total', context, {'variationKey': 'variation-key', 'configKey': 'config-key'}, 330),
call('$ld:ai:tokens:input', context, {'variationKey': 'variation-key', 'configKey': 'config-key'}, 220),
call('$ld:ai:tokens:output', context, {'variationKey': 'variation-key', 'configKey': 'config-key'}, 110),
]

client.track.assert_has_calls(calls) # type: ignore

assert tracker.get_summary().success is False
assert tracker.get_summary().duration == 50
assert tracker.get_summary().usage == TokenUsage(330, 220, 110)


def test_tracks_openai_metrics(client: LDClient):
context = Context.create('user-key')
tracker = LDAIConfigTracker(client, "variation-key", "config-key", context)
Expand Down Expand Up @@ -166,11 +200,44 @@ def test_tracks_success(client: LDClient):
tracker = LDAIConfigTracker(client, "variation-key", "config-key", context)
tracker.track_success()

client.track.assert_called_with( # type: ignore
'$ld:ai:generation',
context,
{'variationKey': 'variation-key', 'configKey': 'config-key'},
1
)
calls = [
call('$ld:ai:generation', context, {'variationKey': 'variation-key', 'configKey': 'config-key'}, 1),
call('$ld:ai:generation:success', context, {'variationKey': 'variation-key', 'configKey': 'config-key'}, 1),
]

client.track.assert_has_calls(calls) # type: ignore

assert tracker.get_summary().success is True


def test_tracks_error(client: LDClient):
context = Context.create('user-key')
tracker = LDAIConfigTracker(client, "variation-key", "config-key", context)
tracker.track_error()

calls = [
call('$ld:ai:generation', context, {'variationKey': 'variation-key', 'configKey': 'config-key'}, 1),
call('$ld:ai:generation:error', context, {'variationKey': 'variation-key', 'configKey': 'config-key'}, 1),
]

client.track.assert_has_calls(calls) # type: ignore

assert tracker.get_summary().success is False


def test_error_overwrites_success(client: LDClient):
context = Context.create('user-key')
tracker = LDAIConfigTracker(client, "variation-key", "config-key", context)
tracker.track_success()
tracker.track_error()

calls = [
call('$ld:ai:generation', context, {'variationKey': 'variation-key', 'configKey': 'config-key'}, 1),
call('$ld:ai:generation:success', context, {'variationKey': 'variation-key', 'configKey': 'config-key'}, 1),
call('$ld:ai:generation', context, {'variationKey': 'variation-key', 'configKey': 'config-key'}, 1),
call('$ld:ai:generation:error', context, {'variationKey': 'variation-key', 'configKey': 'config-key'}, 1),
]

client.track.assert_has_calls(calls) # type: ignore

assert tracker.get_summary().success is False
18 changes: 16 additions & 2 deletions ldai/tracker.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,21 @@ def track_success(self) -> None:
self._ld_client.track(
'$ld:ai:generation', self._context, self.__get_track_data(), 1
)
self._ld_client.track(
'$ld:ai:generation:success', self._context, self.__get_track_data(), 1
)

def track_error(self) -> None:
"""
Track an unsuccessful AI generation attempt.
"""
self._summary._success = False
self._ld_client.track(
'$ld:ai:generation', self._context, self.__get_track_data(), 1
)
self._ld_client.track(
'$ld:ai:generation:error', self._context, self.__get_track_data(), 1
)

def track_openai_metrics(self, func):
"""
Expand All @@ -170,8 +185,7 @@ def track_bedrock_converse_metrics(self, res: dict) -> dict:
if status_code == 200:
self.track_success()
elif status_code >= 400:
# Potentially add error tracking in the future.
pass
self.track_error()
if res.get('metrics', {}).get('latencyMs'):
self.track_duration(res['metrics']['latencyMs'])
if res.get('usage'):
Expand Down
Loading