1- import os
21import boto3
2+ import json
33from langtrace_python_sdk import langtrace
4+ from dotenv import load_dotenv
5+ import botocore
6+
7+ load_dotenv ()
8+ langtrace .init (write_spans_to_console = False )
9+
10+ brt = boto3 .client ("bedrock-runtime" , region_name = "us-east-1" )
11+ brc = boto3 .client ("bedrock" , region_name = "us-east-1" )
12+
13+
14+ def use_converse_stream ():
15+ model_id = "anthropic.claude-3-haiku-20240307-v1:0"
16+ conversation = [
17+ {
18+ "role" : "user" ,
19+ "content" : [{"text" : "what is the capital of France?" }],
20+ }
21+ ]
22+
23+ try :
24+ response = brt .converse_stream (
25+ modelId = model_id ,
26+ messages = conversation ,
27+ inferenceConfig = {"maxTokens" : 4096 , "temperature" : 0 },
28+ additionalModelRequestFields = {"top_k" : 250 },
29+ )
30+ # response_text = response["output"]["message"]["content"][0]["text"]
31+ print (response )
32+
33+ except Exception as e :
34+ print (f"ERROR: Can't invoke '{ model_id } '. Reason: { e } " )
35+ exit (1 )
436
5- langtrace .init (api_key = os .environ ["LANGTRACE_API_KEY" ])
637
738def use_converse ():
839 model_id = "anthropic.claude-3-haiku-20240307-v1:0"
9- client = boto3 .client (
10- "bedrock-runtime" ,
11- region_name = "us-east-1" ,
12- aws_access_key_id = os .environ ["AWS_ACCESS_KEY_ID" ],
13- aws_secret_access_key = os .environ ["AWS_SECRET_ACCESS_KEY" ],
14- )
1540 conversation = [
1641 {
1742 "role" : "user" ,
18- "content" : [{"text" : "Write a story about a magic backpack. " }],
43+ "content" : [{"text" : "what is the capital of France? " }],
1944 }
2045 ]
2146
2247 try :
23- response = client .converse (
48+ response = brt .converse (
2449 modelId = model_id ,
2550 messages = conversation ,
26- inferenceConfig = {"maxTokens" :4096 ,"temperature" :0 },
27- additionalModelRequestFields = {"top_k" :250 }
51+ inferenceConfig = {"maxTokens" : 4096 , "temperature" : 0 },
52+ additionalModelRequestFields = {"top_k" : 250 },
2853 )
2954 response_text = response ["output" ]["message" ]["content" ][0 ]["text" ]
3055 print (response_text )
3156
32- except ( Exception ) as e :
57+ except Exception as e :
3358 print (f"ERROR: Can't invoke '{ model_id } '. Reason: { e } " )
34- exit (1 )
59+ exit (1 )
60+
61+
62+ def get_foundation_models ():
63+ for model in brc .list_foundation_models ()["modelSummaries" ]:
64+ print (model ["modelId" ])
65+
66+
67+ # Invoke Model API
68+ # Amazon Titan Models
69+ def use_invoke_model_titan (stream = False ):
70+ try :
71+ prompt_data = "what's the capital of France?"
72+ body = json .dumps (
73+ {
74+ "inputText" : prompt_data ,
75+ "textGenerationConfig" : {
76+ "maxTokenCount" : 1024 ,
77+ "topP" : 0.95 ,
78+ "temperature" : 0.2 ,
79+ },
80+ }
81+ )
82+ modelId = "amazon.titan-text-express-v1" # "amazon.titan-tg1-large"
83+ accept = "application/json"
84+ contentType = "application/json"
85+
86+ if stream :
87+
88+ response = brt .invoke_model_with_response_stream (
89+ body = body , modelId = modelId , accept = accept , contentType = contentType
90+ )
91+ else :
92+ response = brt .invoke_model (
93+ body = body , modelId = modelId , accept = accept , contentType = contentType
94+ )
95+ response_body = json .loads (response .get ("body" ).read ())
96+
97+ except botocore .exceptions .ClientError as error :
98+
99+ if error .response ["Error" ]["Code" ] == "AccessDeniedException" :
100+ print (
101+ f"\x1b [41m{ error .response ['Error' ]['Message' ]} \
102+ \n To troubeshoot this issue please refer to the following resources.\
103+ \n https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_access-denied.html\
104+ \n https://docs.aws.amazon.com/bedrock/latest/userguide/security-iam.html\x1b [0m\n "
105+ )
106+
107+ else :
108+ raise error
109+
110+
111+ # Anthropic Models
112+ def use_invoke_model_anthropic (stream = False ):
113+ body = json .dumps (
114+ {
115+ "anthropic_version" : "bedrock-2023-05-31" ,
116+ "max_tokens" : 1024 ,
117+ "temperature" : 0.1 ,
118+ "top_p" : 0.9 ,
119+ "messages" : [{"role" : "user" , "content" : "Hello, Claude" }],
120+ }
121+ )
122+ modelId = "anthropic.claude-v2"
123+ accept = "application/json"
124+ contentType = "application/json"
125+
126+ if stream :
127+ response = brt .invoke_model_with_response_stream (body = body , modelId = modelId )
128+ stream_response = response .get ("body" )
129+ if stream_response :
130+ for event in stream_response :
131+ chunk = event .get ("chunk" )
132+ if chunk :
133+ print (json .loads (chunk .get ("bytes" ).decode ()))
134+
135+ else :
136+ response = brt .invoke_model (
137+ body = body , modelId = modelId , accept = accept , contentType = contentType
138+ )
139+ response_body = json .loads (response .get ("body" ).read ())
140+ # text
141+ print (response_body .get ("completion" ))
142+
143+
144+ def use_invoke_model_llama ():
145+ model_id = "meta.llama3-8b-instruct-v1:0"
146+ prompt = "What is the capital of France?"
147+ max_gen_len = 128
148+ temperature = 0.1
149+ top_p = 0.9
150+
151+ # Create request body.
152+ body = json .dumps (
153+ {
154+ "prompt" : prompt ,
155+ "max_gen_len" : max_gen_len ,
156+ "temperature" : temperature ,
157+ "top_p" : top_p ,
158+ }
159+ )
160+ response = brt .invoke_model (body = body , modelId = model_id )
161+
162+ response_body = json .loads (response .get ("body" ).read ())
163+
164+ return response_body
165+
166+
167+ # print(get_foundation_models())
168+ def use_invoke_model_cohere ():
169+ model_id = "cohere.command-r-plus-v1"
170+ prompt = "What is the capital of France?"
171+ body = json .dumps ({"prompt" : prompt , "max_tokens" : 1024 , "temperature" : 0.1 })
172+ response = brt .invoke_model (body = body , modelId = model_id )
173+ response_body = json .loads (response .get ("body" ).read ())
174+ print (response_body )
0 commit comments