Skip to content

Commit 0fe6825

Browse files
committed
merge with dev
2 parents b3441b6 + 6431547 commit 0fe6825

File tree

22 files changed

+529
-24
lines changed

22 files changed

+529
-24
lines changed

README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -246,6 +246,7 @@ Langtrace automatically captures traces from the following vendors:
246246
| Groq | LLM | :x: | :white_check_mark: |
247247
| Perplexity | LLM | :white_check_mark: | :white_check_mark: |
248248
| Gemini | LLM | :x: | :white_check_mark: |
249+
| Mistral | LLM | :x: | :white_check_mark: |
249250
| Langchain | Framework | :x: | :white_check_mark: |
250251
| LlamaIndex | Framework | :white_check_mark: | :white_check_mark: |
251252
| Langgraph | Framework | :x: | :white_check_mark: |

pyproject.toml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ classifiers = [
1818
"Operating System :: OS Independent",
1919
]
2020
dependencies = [
21-
'trace-attributes==7.0.1',
21+
'trace-attributes==7.0.4',
2222
'opentelemetry-api>=1.25.0',
2323
'opentelemetry-sdk>=1.25.0',
2424
'opentelemetry-instrumentation>=0.47b0',
@@ -54,6 +54,7 @@ dev = [
5454
"groq",
5555
"google-generativeai",
5656
"google-cloud-aiplatform",
57+
"mistralai",
5758
]
5859

5960
test = ["pytest", "pytest-vcr", "pytest-asyncio"]

src/examples/crewai_example/trip_planner/agents.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -12,9 +12,9 @@ class TravelAgents:
1212
def __init__(self):
1313
self.OpenAIGPT35 = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.7)
1414
self.OpenAIGPT4 = ChatOpenAI(model_name="gpt-4", temperature=0.7)
15-
self.Ollama = ChatOllama(model="openhermes")
15+
self.Ollama = ChatOllama(model="llama3")
1616
self.Cohere = ChatCohere(model="command-r")
17-
self.Anthropic = ChatAnthropic(model="claude-3-5-sonnet")
17+
self.Anthropic = ChatAnthropic(model="claude-3-5-sonnet-20240620")
1818

1919
def expert_travel_agent(self):
2020
return Agent(
@@ -28,7 +28,7 @@ def expert_travel_agent(self):
2828
# tools=[tool_1, tool_2],
2929
allow_delegation=False,
3030
verbose=True,
31-
llm=self.OpenAIGPT4,
31+
llm=self.Cohere,
3232
)
3333

3434
def city_selection_expert(self):
@@ -39,7 +39,7 @@ def city_selection_expert(self):
3939
# tools=[tool_1, tool_2],
4040
allow_delegation=False,
4141
verbose=True,
42-
llm=self.OpenAIGPT4,
42+
llm=self.Cohere,
4343
)
4444

4545
def local_tour_guide(self):
@@ -50,5 +50,5 @@ def local_tour_guide(self):
5050
# tools=[tool_1, tool_2],
5151
allow_delegation=False,
5252
verbose=True,
53-
llm=self.OpenAIGPT4,
53+
llm=self.Cohere,
5454
)
Lines changed: 98 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,98 @@
1+
from langtrace_python_sdk import with_langtrace_root_span, langtrace
2+
from dotenv import load_dotenv
3+
from litellm import completion, acompletion
4+
import litellm
5+
import asyncio
6+
7+
load_dotenv()
8+
9+
10+
litellm.success_callback = ["langtrace"]
11+
langtrace.init()
12+
litellm.set_verbose = False
13+
14+
15+
@with_langtrace_root_span("Litellm Example OpenAI")
16+
def openAI(streaming=False):
17+
response = completion(
18+
model="gpt-3.5-turbo",
19+
messages=[
20+
{"content": "respond only in Yoda speak.", "role": "system"},
21+
{"content": "Hello, how are you?", "role": "user"},
22+
],
23+
stream=streaming,
24+
stream_options={"include_usage": True},
25+
)
26+
if streaming:
27+
for _ in response:
28+
pass
29+
else:
30+
return response
31+
32+
33+
# @with_langtrace_root_span("Litellm Example Anthropic Completion")
34+
def anthropic(streaming=False):
35+
try:
36+
37+
response = completion(
38+
model="claude-2.1",
39+
messages=[
40+
{"content": "respond only in Yoda speak.", "role": "system"},
41+
{"content": "what is 2 + 2?", "role": "user"},
42+
],
43+
temperature=0.5,
44+
top_p=0.5,
45+
n=1,
46+
stream=streaming,
47+
stream_options={"include_usage": True},
48+
)
49+
# print(response)
50+
if streaming:
51+
for _ in response:
52+
pass
53+
else:
54+
return response
55+
except Exception as e:
56+
print("ERORRRR", e)
57+
58+
59+
# @with_langtrace_root_span("Litellm Example OpenAI Async Streaming")
60+
async def async_anthropic(streaming=False):
61+
response = await acompletion(
62+
model="claude-2.1",
63+
messages=[{"content": "Hello, how are you?", "role": "user"}],
64+
stream=streaming,
65+
stream_options={"include_usage": True},
66+
temperature=0.5,
67+
top_p=0.5,
68+
n=1,
69+
)
70+
if streaming:
71+
async for _ in response:
72+
pass
73+
else:
74+
return response
75+
76+
77+
def cohere(streaming=False):
78+
response = completion(
79+
model="command-r",
80+
messages=[
81+
{"content": "respond only in Yoda speak.", "role": "system"},
82+
{"content": "Hello, how are you?", "role": "user"},
83+
],
84+
stream=streaming,
85+
stream_options={"include_usage": True},
86+
)
87+
if streaming:
88+
for _ in response:
89+
pass
90+
else:
91+
return response
92+
93+
94+
if __name__ == "__main__":
95+
# openAI()
96+
anthropic(streaming=False)
97+
cohere(streaming=True)
98+
# asyncio.run(async_anthropic(streaming=True))
Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
import asyncio
2+
from examples.mistral_example.complete import chat_complete
3+
from examples.mistral_example.complete_async import complete_async
4+
from examples.mistral_example.embeddings import embeddings_create
5+
from langtrace_python_sdk import langtrace, with_langtrace_root_span
6+
7+
langtrace.init()
8+
9+
class MistralRunner:
10+
@with_langtrace_root_span("Mistral")
11+
def run(self):
12+
chat_complete()
13+
asyncio.run(complete_async())
14+
embeddings_create()
Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
import os
2+
from langtrace_python_sdk import with_langtrace_root_span
3+
from mistralai import Mistral
4+
5+
@with_langtrace_root_span("chat_complete")
6+
def chat_complete():
7+
model = "mistral-large-latest"
8+
client = Mistral(api_key=os.environ["MISTRAL_API_KEY"])
9+
chat_response = client.chat.complete(
10+
model= model,
11+
messages = [
12+
{
13+
"role": "user",
14+
"content": "I need 10 cocktail recipes with tequila other than the classics like margarita, tequila"
15+
},
16+
]
17+
)
18+
print(chat_response.choices[0].message.content)
Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
import os
2+
from langtrace_python_sdk import with_langtrace_root_span
3+
from mistralai import Mistral
4+
5+
@with_langtrace_root_span("chat_complete_async")
6+
async def complete_async():
7+
client = Mistral(api_key=os.environ["MISTRAL_API_KEY"])
8+
res = await client.chat.complete_async(model="mistral-small-latest", messages=[
9+
{
10+
"content": "Which locations should I visit when I travel to New york? Answer in one short sentence.",
11+
"role": "user",
12+
},
13+
])
14+
if res is not None:
15+
# handle response
16+
print(res.choices[0].message.content)
Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
import os
2+
from langtrace_python_sdk import with_langtrace_root_span
3+
from mistralai import Mistral
4+
5+
6+
@with_langtrace_root_span("create_embeddings")
7+
def embeddings_create():
8+
model = "mistral-embed"
9+
10+
client = Mistral(api_key=os.environ["MISTRAL_API_KEY"])
11+
12+
embeddings_batch_response = client.embeddings.create(
13+
model=model,
14+
inputs=["Embed this sentence.", "As well as this one."],
15+
)
16+
17+
print(embeddings_batch_response.data[0].embedding)

src/examples/weaviate_example/query_text.py

Lines changed: 12 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -15,13 +15,14 @@
1515
import requests
1616
import weaviate
1717
import weaviate.classes as wvc
18+
import weaviate.classes.config as wc
19+
from dotenv import load_dotenv
1820
from weaviate.classes.aggregate import GroupByAggregate
1921
from weaviate.classes.query import Filter, HybridFusion, MetadataQuery
2022
from weaviate.collections.classes.grpc import Move
2123

2224
import langtrace_python_sdk.langtrace as langtrace
2325
from langtrace_python_sdk import with_langtrace_root_span
24-
from dotenv import load_dotenv
2526

2627
load_dotenv()
2728
# Set these environment variables
@@ -44,11 +45,16 @@ def create():
4445
if not client.collections.get("Question"):
4546
questions = client.collections.create(
4647
name="Question",
47-
# Set the vectorizer to "text2vec-openai" to use the OpenAI API for vector-related operations
48-
vectorizer_config=wvc.config.Configure.Vectorizer.text2vec_openai(),
49-
# Ensure the `generative-openai` module is used for generative queries
50-
generative_config=wvc.config.Configure.Generative.openai(),
51-
)
48+
properties=[
49+
wc.Property(name="answer", data_type=wc.DataType.TEXT),
50+
wc.Property(name="question", data_type=wc.DataType.TEXT),
51+
wc.Property(name="category", data_type=wc.DataType.TEXT),
52+
],
53+
# Define the vectorizer module
54+
vectorizer_config=wc.Configure.Vectorizer.text2vec_openai(),
55+
# Define the generative module
56+
generative_config=wc.Configure.Generative.openai(),
57+
)
5258

5359

5460
@with_langtrace_root_span("insert")

src/langtrace_python_sdk/constants/instrumentation/common.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@
2929
"OLLAMA": "Ollama",
3030
"VERTEXAI": "VertexAI",
3131
"GEMINI": "Gemini",
32+
"MISTRAL": "Mistral",
3233
}
3334

3435
LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY = "langtrace_additional_attributes"

0 commit comments

Comments
 (0)