Skip to content

Commit 7c3fbe6

Browse files
committed
Restore model used in other docs.
1 parent 32d0a18 commit 7c3fbe6

File tree

3 files changed

+108
-0
lines changed

3 files changed

+108
-0
lines changed

.doc_gen/metadata/bedrock-runtime_metadata.yaml

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1483,3 +1483,22 @@ bedrock-runtime_Scenario_GenerateVideos_NovaReel:
14831483
- bedrock-runtime.java2.NovaReel.VideoGeneration
14841484
services:
14851485
bedrock-runtime: {StartAsyncInvoke, GetAsyncInvoke}
1486+
1487+
bedrock-runtime_InvokeModel_TitanText:
1488+
title: Invoke Amazon Titan Text models on &BR; using the Invoke Model API
1489+
title_abbrev: "InvokeModel"
1490+
synopsis: send a text message to Amazon Titan Text, using the Invoke Model API.
1491+
category: Amazon Titan Text
1492+
languages:
1493+
Python:
1494+
versions:
1495+
- sdk_version: 3
1496+
github: python/example_code/bedrock-runtime
1497+
excerpts:
1498+
- description: Use the Invoke Model API to send a text message.
1499+
snippet_tags:
1500+
- python.example_code.bedrock-runtime.InvokeModel_TitanText
1501+
services:
1502+
bedrock-runtime: {InvokeModel}
1503+
1504+
Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
2+
# SPDX-License-Identifier: Apache-2.0
3+
4+
# snippet-start:[python.example_code.bedrock-runtime.Converse_AmazonTitanText]
5+
# Use the Conversation API to send a text message to Amazon Titan Text.
6+
7+
import boto3
8+
from botocore.exceptions import ClientError
9+
10+
# Create a Bedrock Runtime client in the AWS Region you want to use.
11+
client = boto3.client("bedrock-runtime", region_name="us-east-1")
12+
13+
# Set the model ID, e.g., Titan Text Premier.
14+
model_id = "amazon.titan-text-premier-v1:0"
15+
16+
# Start a conversation with the user message.
17+
user_message = "Describe the purpose of a 'hello world' program in one line."
18+
conversation = [
19+
{
20+
"role": "user",
21+
"content": [{"text": user_message}],
22+
}
23+
]
24+
25+
try:
26+
# Send the message to the model, using a basic inference configuration.
27+
response = client.converse(
28+
modelId=model_id,
29+
messages=conversation,
30+
inferenceConfig={"maxTokens": 512, "temperature": 0.5, "topP": 0.9},
31+
)
32+
33+
# Extract and print the response text.
34+
response_text = response["output"]["message"]["content"][0]["text"]
35+
print(response_text)
36+
37+
except (ClientError, Exception) as e:
38+
print(f"ERROR: Can't invoke '{model_id}'. Reason: {e}")
39+
exit(1)
40+
41+
# snippet-end:[python.example_code.bedrock-runtime.Converse_AmazonTitanText]
Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
2+
# SPDX-License-Identifier: Apache-2.0
3+
4+
# snippet-start:[python.example_code.bedrock-runtime.InvokeModel_TitanText]
5+
# Use the native inference API to send a text message to Amazon Titan Text.
6+
7+
import boto3
8+
import json
9+
10+
from botocore.exceptions import ClientError
11+
12+
# Create a Bedrock Runtime client in the AWS Region of your choice.
13+
client = boto3.client("bedrock-runtime", region_name="us-east-1")
14+
15+
# Set the model ID, e.g., Titan Text Premier.
16+
model_id = "amazon.titan-text-premier-v1:0"
17+
18+
# Define the prompt for the model.
19+
prompt = "Describe the purpose of a 'hello world' program in one line."
20+
21+
# Format the request payload using the model's native structure.
22+
native_request = {
23+
"inputText": prompt,
24+
"textGenerationConfig": {
25+
"maxTokenCount": 512,
26+
"temperature": 0.5,
27+
},
28+
}
29+
30+
# Convert the native request to JSON.
31+
request = json.dumps(native_request)
32+
33+
try:
34+
# Invoke the model with the request.
35+
response = client.invoke_model(modelId=model_id, body=request)
36+
37+
except (ClientError, Exception) as e:
38+
print(f"ERROR: Can't invoke '{model_id}'. Reason: {e}")
39+
exit(1)
40+
41+
# Decode the response body.
42+
model_response = json.loads(response["body"].read())
43+
44+
# Extract and print the response text.
45+
response_text = model_response["results"][0]["outputText"]
46+
print(response_text)
47+
48+
# snippet-end:[python.example_code.bedrock-runtime.InvokeModel_TitanText]

0 commit comments

Comments
 (0)