Skip to content

Commit 4eacf4a

Browse files
committed
Merge branch 'development' of github.com:Scale3-Labs/langtrace-python-sdk into development
2 parents f217b66 + a5603d0 commit 4eacf4a

File tree

22 files changed

+1013
-142
lines changed

22 files changed

+1013
-142
lines changed
Lines changed: 14 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,20 @@
1-
from examples.awsbedrock_examples.converse import use_converse
1+
from examples.awsbedrock_examples.converse import (
2+
use_invoke_model_anthropic,
3+
init_bedrock_langchain,
4+
use_invoke_model_titan,
5+
use_invoke_model_llama,
6+
)
27
from langtrace_python_sdk import langtrace, with_langtrace_root_span
38

4-
langtrace.init()
5-
69

710
class AWSBedrockRunner:
811
@with_langtrace_root_span("AWS_Bedrock")
912
def run(self):
10-
use_converse()
13+
14+
# use_converse_stream()
15+
# use_converse()
16+
# use_invoke_model_anthropic(stream=True)
17+
# use_invoke_model_cohere()
18+
# use_invoke_model_llama(stream=False)
19+
# use_invoke_model_titan(stream=False)
20+
init_bedrock_langchain()
Lines changed: 179 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,34 +1,199 @@
1-
import os
21
import boto3
2+
import json
33
from langtrace_python_sdk import langtrace
4+
from dotenv import load_dotenv
5+
import botocore
6+
from langchain_aws import ChatBedrock
7+
8+
load_dotenv()
9+
langtrace.init(write_spans_to_console=False)
10+
11+
brt = boto3.client("bedrock-runtime", region_name="us-east-1")
12+
brc = boto3.client("bedrock", region_name="us-east-1")
13+
14+
15+
def use_converse_stream():
16+
model_id = "anthropic.claude-3-haiku-20240307-v1:0"
17+
conversation = [
18+
{
19+
"role": "user",
20+
"content": [{"text": "what is the capital of France?"}],
21+
}
22+
]
23+
24+
try:
25+
response = brt.converse_stream(
26+
modelId=model_id,
27+
messages=conversation,
28+
inferenceConfig={"maxTokens": 4096, "temperature": 0},
29+
additionalModelRequestFields={"top_k": 250},
30+
)
31+
# response_text = response["output"]["message"]["content"][0]["text"]
32+
print(response)
33+
34+
except Exception as e:
35+
print(f"ERROR: Can't invoke '{model_id}'. Reason: {e}")
36+
exit(1)
437

5-
langtrace.init(api_key=os.environ["LANGTRACE_API_KEY"])
638

739
def use_converse():
840
model_id = "anthropic.claude-3-haiku-20240307-v1:0"
9-
client = boto3.client(
10-
"bedrock-runtime",
11-
region_name="us-east-1",
12-
aws_access_key_id=os.environ["AWS_ACCESS_KEY_ID"],
13-
aws_secret_access_key=os.environ["AWS_SECRET_ACCESS_KEY"],
14-
)
1541
conversation = [
1642
{
1743
"role": "user",
18-
"content": [{"text": "Write a story about a magic backpack."}],
44+
"content": [{"text": "what is the capital of France?"}],
1945
}
2046
]
2147

2248
try:
23-
response = client.converse(
49+
response = brt.converse(
2450
modelId=model_id,
2551
messages=conversation,
26-
inferenceConfig={"maxTokens":4096,"temperature":0},
27-
additionalModelRequestFields={"top_k":250}
52+
inferenceConfig={"maxTokens": 4096, "temperature": 0},
53+
additionalModelRequestFields={"top_k": 250},
2854
)
2955
response_text = response["output"]["message"]["content"][0]["text"]
3056
print(response_text)
3157

32-
except (Exception) as e:
58+
except Exception as e:
3359
print(f"ERROR: Can't invoke '{model_id}'. Reason: {e}")
34-
exit(1)
60+
exit(1)
61+
62+
63+
def get_foundation_models():
64+
for model in brc.list_foundation_models()["modelSummaries"]:
65+
print(model["modelId"])
66+
67+
68+
# Invoke Model API
69+
# Amazon Titan Models
70+
def use_invoke_model_titan(stream=False):
71+
try:
72+
prompt_data = "what's the capital of France?"
73+
body = json.dumps(
74+
{
75+
"inputText": prompt_data,
76+
"textGenerationConfig": {
77+
"maxTokenCount": 1024,
78+
"topP": 0.95,
79+
"temperature": 0.2,
80+
},
81+
}
82+
)
83+
modelId = "amazon.titan-text-express-v1" # "amazon.titan-tg1-large"
84+
accept = "application/json"
85+
contentType = "application/json"
86+
87+
if stream:
88+
89+
response = brt.invoke_model_with_response_stream(
90+
body=body, modelId=modelId, accept=accept, contentType=contentType
91+
)
92+
# Extract and print the response text in real-time.
93+
for event in response["body"]:
94+
chunk = json.loads(event["chunk"]["bytes"])
95+
if "outputText" in chunk:
96+
print(chunk["outputText"], end="")
97+
98+
else:
99+
response = brt.invoke_model(
100+
body=body, modelId=modelId, accept=accept, contentType=contentType
101+
)
102+
response_body = json.loads(response.get("body").read())
103+
104+
except botocore.exceptions.ClientError as error:
105+
106+
if error.response["Error"]["Code"] == "AccessDeniedException":
107+
print(
108+
f"\x1b[41m{error.response['Error']['Message']}\
109+
\nTo troubeshoot this issue please refer to the following resources.\
110+
\nhttps://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_access-denied.html\
111+
\nhttps://docs.aws.amazon.com/bedrock/latest/userguide/security-iam.html\x1b[0m\n"
112+
)
113+
114+
else:
115+
raise error
116+
117+
118+
# Anthropic Models
119+
def use_invoke_model_anthropic(stream=False):
120+
body = json.dumps(
121+
{
122+
"anthropic_version": "bedrock-2023-05-31",
123+
"max_tokens": 1024,
124+
"temperature": 0.1,
125+
"top_p": 0.9,
126+
"messages": [{"role": "user", "content": "Hello, Claude"}],
127+
}
128+
)
129+
modelId = "anthropic.claude-v2"
130+
accept = "application/json"
131+
contentType = "application/json"
132+
133+
if stream:
134+
response = brt.invoke_model_with_response_stream(body=body, modelId=modelId)
135+
stream_response = response.get("body")
136+
if stream_response:
137+
for event in stream_response:
138+
chunk = event.get("chunk")
139+
if chunk:
140+
# print(json.loads(chunk.get("bytes").decode()))
141+
pass
142+
143+
else:
144+
response = brt.invoke_model(
145+
body=body, modelId=modelId, accept=accept, contentType=contentType
146+
)
147+
response_body = json.loads(response.get("body").read())
148+
# text
149+
print(response_body.get("completion"))
150+
151+
152+
def use_invoke_model_llama(stream=False):
153+
model_id = "meta.llama3-8b-instruct-v1:0"
154+
prompt = "What is the capital of France?"
155+
max_gen_len = 128
156+
temperature = 0.1
157+
top_p = 0.9
158+
159+
# Create request body.
160+
body = json.dumps(
161+
{
162+
"prompt": prompt,
163+
"max_gen_len": max_gen_len,
164+
"temperature": temperature,
165+
"top_p": top_p,
166+
}
167+
)
168+
169+
if stream:
170+
response = brt.invoke_model_with_response_stream(body=body, modelId=model_id)
171+
for event in response["body"]:
172+
chunk = json.loads(event["chunk"]["bytes"])
173+
if "generation" in chunk:
174+
# print(chunk["generation"], end="")
175+
pass
176+
else:
177+
response = brt.invoke_model(body=body, modelId=model_id)
178+
response_body = json.loads(response.get("body").read())
179+
return response_body
180+
181+
182+
# print(get_foundation_models())
183+
def use_invoke_model_cohere():
184+
model_id = "cohere.command-r-plus-v1"
185+
prompt = "What is the capital of France?"
186+
body = json.dumps({"prompt": prompt, "max_tokens": 1024, "temperature": 0.1})
187+
response = brt.invoke_model(body=body, modelId=model_id)
188+
response_body = json.loads(response.get("body").read())
189+
print(response_body)
190+
191+
192+
def init_bedrock_langchain(temperature=0.1):
193+
chat = ChatBedrock(
194+
model_id="anthropic.claude-v2",
195+
streaming=True,
196+
model_kwargs={"temperature": temperature},
197+
region_name="us-east-1",
198+
)
199+
return chat.invoke("What is the capital of France?")

src/examples/gemini_example/main.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,16 +20,16 @@ async def async_demo():
2020

2121
def basic():
2222
generate()
23-
generate(stream=True, with_tools=True)
23+
# generate(stream=True, with_tools=True)
2424

2525
# image_to_text()
2626
# audio_to_text()
27-
asyncio.run(async_demo())
27+
# asyncio.run(async_demo())
2828

2929

3030
def generate(stream=False, with_tools=False):
3131
model = genai.GenerativeModel(
32-
"gemini-1.5-pro", system_instruction="You are a cat. Your name is Neko."
32+
"gemini-2.0-flash-exp", system_instruction="You are a cat. Your name is Neko."
3333
)
3434

3535
response = model.generate_content(
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
from .main import generate_content, generate_content_streaming
2+
3+
4+
class GoogleGenaiRunner:
5+
def run(self):
6+
# generate_content()
7+
generate_content_streaming()
Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
from google import genai
2+
from dotenv import load_dotenv
3+
import os
4+
from langtrace_python_sdk import langtrace
5+
6+
load_dotenv()
7+
langtrace.init(write_spans_to_console=False)
8+
9+
10+
def generate_content():
11+
# Only run this block for Google AI API
12+
client = genai.Client(api_key=os.getenv("GEMINI_API_KEY"))
13+
response = client.models.generate_content(
14+
model="gemini-2.0-flash-exp", contents="What is your name?"
15+
)
16+
17+
print(response.text)
18+
19+
20+
def generate_content_streaming():
21+
client = genai.Client(api_key=os.getenv("GEMINI_API_KEY"))
22+
response = client.models.generate_content_stream(
23+
model="gemini-2.0-flash-exp", contents="What is your name?"
24+
)
25+
26+
for chunk in response:
27+
pass

src/langtrace_python_sdk/constants/instrumentation/aws_bedrock.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,10 @@
11
from langtrace.trace_attributes import AWSBedrockMethods
22

33
APIS = {
4+
"INVOKE_MODEL": {
5+
"METHOD": "aws_bedrock.invoke_model",
6+
"ENDPOINT": "/invoke-model",
7+
},
48
"CONVERSE": {
59
"METHOD": AWSBedrockMethods.CONVERSE.value,
610
"ENDPOINT": "/converse",

src/langtrace_python_sdk/constants/instrumentation/common.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010

1111
SERVICE_PROVIDERS = {
1212
"ANTHROPIC": "Anthropic",
13+
"ARCH": "Arch",
1314
"AZURE": "Azure",
1415
"CHROMA": "Chroma",
1516
"CREWAI": "CrewAI",

src/langtrace_python_sdk/instrumentation/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
from .pymongo import PyMongoInstrumentation
2525
from .cerebras import CerebrasInstrumentation
2626
from .milvus import MilvusInstrumentation
27+
from .google_genai import GoogleGenaiInstrumentation
2728

2829
__all__ = [
2930
"AnthropicInstrumentation",
@@ -52,4 +53,5 @@
5253
"AWSBedrockInstrumentation",
5354
"CerebrasInstrumentation",
5455
"MilvusInstrumentation",
56+
"GoogleGenaiInstrumentation",
5557
]

0 commit comments

Comments
 (0)