Skip to content

Commit 4a89bf6

Browse files
committed
support of amazon nova model
1 parent 8d66556 commit 4a89bf6

File tree

2 files changed

+36
-9
lines changed

2 files changed

+36
-9
lines changed

aws-opentelemetry-distro/src/amazon/opentelemetry/distro/patches/_bedrock_patches.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -268,6 +268,8 @@ def extract_attributes(self, attributes: _AttributeMapT):
268268

269269
if "amazon.titan" in model_id:
270270
self._extract_titan_attributes(attributes, request_body)
271+
if "amazon.nova" in model_id:
272+
self._extract_nova_attributes(attributes, request_body)
271273
elif "anthropic.claude" in model_id:
272274
self._extract_claude_attributes(attributes, request_body)
273275
elif "meta.llama" in model_id:
@@ -288,6 +290,12 @@ def _extract_titan_attributes(self, attributes, request_body):
288290
self._set_if_not_none(attributes, GEN_AI_REQUEST_TOP_P, config.get("topP"))
289291
self._set_if_not_none(attributes, GEN_AI_REQUEST_MAX_TOKENS, config.get("maxTokenCount"))
290292

293+
def _extract_nova_attributes(self, attributes, request_body):
294+
config = request_body.get("inferenceConfig", {})
295+
self._set_if_not_none(attributes, GEN_AI_REQUEST_TEMPERATURE, config.get("temperature"))
296+
self._set_if_not_none(attributes, GEN_AI_REQUEST_TOP_P, config.get("top_p"))
297+
self._set_if_not_none(attributes, GEN_AI_REQUEST_MAX_TOKENS, config.get("max_new_tokens"))
298+
291299
def _extract_claude_attributes(self, attributes, request_body):
292300
self._set_if_not_none(attributes, GEN_AI_REQUEST_MAX_TOKENS, request_body.get("max_tokens"))
293301
self._set_if_not_none(attributes, GEN_AI_REQUEST_TEMPERATURE, request_body.get("temperature"))
@@ -342,6 +350,8 @@ def on_success(self, span: Span, result: Dict[str, Any]):
342350
response_body = json.loads(telemetry_content.decode("utf-8"))
343351
if "amazon.titan" in model_id:
344352
self._handle_amazon_titan_response(span, response_body)
353+
if "amazon.nova" in model_id:
354+
self._handle_amazon_nova_response(span, response_body)
345355
elif "anthropic.claude" in model_id:
346356
self._handle_anthropic_claude_response(span, response_body)
347357
elif "meta.llama" in model_id:
@@ -374,6 +384,14 @@ def _handle_amazon_titan_response(self, span: Span, response_body: Dict[str, Any
374384
span.set_attribute(GEN_AI_USAGE_OUTPUT_TOKENS, result["tokenCount"])
375385
if "completionReason" in result:
376386
span.set_attribute(GEN_AI_RESPONSE_FINISH_REASONS, [result["completionReason"]])
387+
388+
def _handle_amazon_nova_response(self, span: Span, response_body: Dict[str, Any]):
389+
if "inputTokenCount" in response_body:
390+
span.set_attribute(GEN_AI_USAGE_INPUT_TOKENS, response_body["inputTokenCount"])
391+
if "outputTokenCount" in response_body:
392+
span.set_attribute(GEN_AI_USAGE_OUTPUT_TOKENS, response_body["outputTokenCount"])
393+
if "stopReason" in response_body:
394+
span.set_attribute(GEN_AI_RESPONSE_FINISH_REASONS, [response_body["stopReason"]])
377395

378396
# pylint: disable=no-self-use
379397
def _handle_anthropic_claude_response(self, span: Span, response_body: Dict[str, Any]):

sample-applications/simple-client-server/server_automatic_s3client.py

Lines changed: 18 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,29 @@
11
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
22
# SPDX-License-Identifier: Apache-2.0
33
import boto3
4-
from flask import Flask, request
5-
6-
# Let's use Amazon S3
7-
s3 = boto3.resource("s3")
4+
import json
5+
from flask import Flask
86

7+
client = boto3.client(service_name="bedrock-runtime")
98
app = Flask(__name__)
109

11-
1210
@app.route("/server_request")
1311
def server_request():
14-
print(request.args.get("param"))
15-
for bucket in s3.buckets.all():
16-
print(bucket.name)
17-
return "served"
12+
messages = [
13+
{"role": "user", "content": [{"text": "Write a short poem"}]},
14+
]
15+
16+
model_response = client.converse(
17+
modelId="us.amazon.nova-lite-v1:0",
18+
messages=messages
19+
)
20+
21+
print("\n[Full Response]")
22+
print(json.dumps(model_response, indent=2))
23+
24+
print("\n[Response Content Text]")
25+
print(model_response["output"]["message"]["content"][0]["text"])
26+
1827

1928

2029
if __name__ == "__main__":

0 commit comments

Comments
 (0)