Skip to content

Commit 93c1588

Browse files
authored
feat: Support Gen AI attributes for Amazon Nova foundational model (#300)
*Description of changes:* Added GenAI inference parameters auto instrumentation support for Amazon Nova. Contract tests: <img width="924" alt="image" src="https://github.com/user-attachments/assets/5e7544d7-3d37-432f-a1d7-25690f12453f" /> <img width="889" alt="image" src="https://github.com/user-attachments/assets/408f4c7a-5d69-41d1-800a-b88f8db7aef0" /> By submitting this pull request, I confirm that you can use, modify, copy, and redistribute this contribution, under the terms of your choice.
1 parent 264493e commit 93c1588

File tree

4 files changed

+294
-193
lines changed

4 files changed

+294
-193
lines changed

aws-opentelemetry-distro/src/amazon/opentelemetry/distro/patches/_bedrock_patches.py

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -268,6 +268,8 @@ def extract_attributes(self, attributes: _AttributeMapT):
268268

269269
if "amazon.titan" in model_id:
270270
self._extract_titan_attributes(attributes, request_body)
271+
if "amazon.nova" in model_id:
272+
self._extract_nova_attributes(attributes, request_body)
271273
elif "anthropic.claude" in model_id:
272274
self._extract_claude_attributes(attributes, request_body)
273275
elif "meta.llama" in model_id:
@@ -288,6 +290,12 @@ def _extract_titan_attributes(self, attributes, request_body):
288290
self._set_if_not_none(attributes, GEN_AI_REQUEST_TOP_P, config.get("topP"))
289291
self._set_if_not_none(attributes, GEN_AI_REQUEST_MAX_TOKENS, config.get("maxTokenCount"))
290292

293+
def _extract_nova_attributes(self, attributes, request_body):
294+
config = request_body.get("inferenceConfig", {})
295+
self._set_if_not_none(attributes, GEN_AI_REQUEST_TEMPERATURE, config.get("temperature"))
296+
self._set_if_not_none(attributes, GEN_AI_REQUEST_TOP_P, config.get("top_p"))
297+
self._set_if_not_none(attributes, GEN_AI_REQUEST_MAX_TOKENS, config.get("max_new_tokens"))
298+
291299
def _extract_claude_attributes(self, attributes, request_body):
292300
self._set_if_not_none(attributes, GEN_AI_REQUEST_MAX_TOKENS, request_body.get("max_tokens"))
293301
self._set_if_not_none(attributes, GEN_AI_REQUEST_TEMPERATURE, request_body.get("temperature"))
@@ -324,6 +332,7 @@ def _set_if_not_none(attributes, key, value):
324332
if value is not None:
325333
attributes[key] = value
326334

335+
# pylint: disable=too-many-branches
327336
def on_success(self, span: Span, result: Dict[str, Any]):
328337
model_id = self._call_context.params.get(_MODEL_ID)
329338

@@ -342,6 +351,8 @@ def on_success(self, span: Span, result: Dict[str, Any]):
342351
response_body = json.loads(telemetry_content.decode("utf-8"))
343352
if "amazon.titan" in model_id:
344353
self._handle_amazon_titan_response(span, response_body)
354+
if "amazon.nova" in model_id:
355+
self._handle_amazon_nova_response(span, response_body)
345356
elif "anthropic.claude" in model_id:
346357
self._handle_anthropic_claude_response(span, response_body)
347358
elif "meta.llama" in model_id:
@@ -375,6 +386,17 @@ def _handle_amazon_titan_response(self, span: Span, response_body: Dict[str, Any
375386
if "completionReason" in result:
376387
span.set_attribute(GEN_AI_RESPONSE_FINISH_REASONS, [result["completionReason"]])
377388

389+
# pylint: disable=no-self-use
390+
def _handle_amazon_nova_response(self, span: Span, response_body: Dict[str, Any]):
391+
if "usage" in response_body:
392+
usage = response_body["usage"]
393+
if "inputTokens" in usage:
394+
span.set_attribute(GEN_AI_USAGE_INPUT_TOKENS, usage["inputTokens"])
395+
if "outputTokens" in usage:
396+
span.set_attribute(GEN_AI_USAGE_OUTPUT_TOKENS, usage["outputTokens"])
397+
if "stopReason" in response_body:
398+
span.set_attribute(GEN_AI_RESPONSE_FINISH_REASONS, [response_body["stopReason"]])
399+
378400
# pylint: disable=no-self-use
379401
def _handle_anthropic_claude_response(self, span: Span, response_body: Dict[str, Any]):
380402
if "usage" in response_body:

0 commit comments

Comments
 (0)