Skip to content

Commit 55645e8

Browse files
committed
Merge branch 'development' of github.com:Scale3-Labs/langtrace-python-sdk into obinna/S3EN-2344-instrument-mistral
2 parents 3e5104c + 0707958 commit 55645e8

File tree

6 files changed

+142
-21
lines changed

6 files changed

+142
-21
lines changed

src/examples/crewai_example/trip_planner/agents.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -12,9 +12,9 @@ class TravelAgents:
1212
def __init__(self):
1313
self.OpenAIGPT35 = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.7)
1414
self.OpenAIGPT4 = ChatOpenAI(model_name="gpt-4", temperature=0.7)
15-
self.Ollama = ChatOllama(model="openhermes")
15+
self.Ollama = ChatOllama(model="llama3")
1616
self.Cohere = ChatCohere(model="command-r")
17-
self.Anthropic = ChatAnthropic(model="claude-3-5-sonnet")
17+
self.Anthropic = ChatAnthropic(model="claude-3-5-sonnet-20240620")
1818

1919
def expert_travel_agent(self):
2020
return Agent(
@@ -28,7 +28,7 @@ def expert_travel_agent(self):
2828
# tools=[tool_1, tool_2],
2929
allow_delegation=False,
3030
verbose=True,
31-
llm=self.OpenAIGPT4,
31+
llm=self.Cohere,
3232
)
3333

3434
def city_selection_expert(self):
@@ -39,7 +39,7 @@ def city_selection_expert(self):
3939
# tools=[tool_1, tool_2],
4040
allow_delegation=False,
4141
verbose=True,
42-
llm=self.OpenAIGPT4,
42+
llm=self.Cohere,
4343
)
4444

4545
def local_tour_guide(self):
@@ -50,5 +50,5 @@ def local_tour_guide(self):
5050
# tools=[tool_1, tool_2],
5151
allow_delegation=False,
5252
verbose=True,
53-
llm=self.OpenAIGPT4,
53+
llm=self.Cohere,
5454
)
Lines changed: 98 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,98 @@
1+
from langtrace_python_sdk import with_langtrace_root_span, langtrace
2+
from dotenv import load_dotenv
3+
from litellm import completion, acompletion
4+
import litellm
5+
import asyncio
6+
7+
load_dotenv()
8+
9+
10+
litellm.success_callback = ["langtrace"]
11+
langtrace.init()
12+
litellm.set_verbose = False
13+
14+
15+
@with_langtrace_root_span("Litellm Example OpenAI")
16+
def openAI(streaming=False):
17+
response = completion(
18+
model="gpt-3.5-turbo",
19+
messages=[
20+
{"content": "respond only in Yoda speak.", "role": "system"},
21+
{"content": "Hello, how are you?", "role": "user"},
22+
],
23+
stream=streaming,
24+
stream_options={"include_usage": True},
25+
)
26+
if streaming:
27+
for _ in response:
28+
pass
29+
else:
30+
return response
31+
32+
33+
# @with_langtrace_root_span("Litellm Example Anthropic Completion")
34+
def anthropic(streaming=False):
35+
try:
36+
37+
response = completion(
38+
model="claude-2.1",
39+
messages=[
40+
{"content": "respond only in Yoda speak.", "role": "system"},
41+
{"content": "what is 2 + 2?", "role": "user"},
42+
],
43+
temperature=0.5,
44+
top_p=0.5,
45+
n=1,
46+
stream=streaming,
47+
stream_options={"include_usage": True},
48+
)
49+
# print(response)
50+
if streaming:
51+
for _ in response:
52+
pass
53+
else:
54+
return response
55+
except Exception as e:
56+
print("ERORRRR", e)
57+
58+
59+
# @with_langtrace_root_span("Litellm Example OpenAI Async Streaming")
60+
async def async_anthropic(streaming=False):
61+
response = await acompletion(
62+
model="claude-2.1",
63+
messages=[{"content": "Hello, how are you?", "role": "user"}],
64+
stream=streaming,
65+
stream_options={"include_usage": True},
66+
temperature=0.5,
67+
top_p=0.5,
68+
n=1,
69+
)
70+
if streaming:
71+
async for _ in response:
72+
pass
73+
else:
74+
return response
75+
76+
77+
def cohere(streaming=False):
78+
response = completion(
79+
model="command-r",
80+
messages=[
81+
{"content": "respond only in Yoda speak.", "role": "system"},
82+
{"content": "Hello, how are you?", "role": "user"},
83+
],
84+
stream=streaming,
85+
stream_options={"include_usage": True},
86+
)
87+
if streaming:
88+
for _ in response:
89+
pass
90+
else:
91+
return response
92+
93+
94+
if __name__ == "__main__":
95+
# openAI()
96+
anthropic(streaming=False)
97+
cohere(streaming=True)
98+
# asyncio.run(async_anthropic(streaming=True))

src/examples/weaviate_example/query_text.py

Lines changed: 12 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -15,13 +15,14 @@
1515
import requests
1616
import weaviate
1717
import weaviate.classes as wvc
18+
import weaviate.classes.config as wc
19+
from dotenv import load_dotenv
1820
from weaviate.classes.aggregate import GroupByAggregate
1921
from weaviate.classes.query import Filter, HybridFusion, MetadataQuery
2022
from weaviate.collections.classes.grpc import Move
2123

2224
import langtrace_python_sdk.langtrace as langtrace
2325
from langtrace_python_sdk import with_langtrace_root_span
24-
from dotenv import load_dotenv
2526

2627
load_dotenv()
2728
# Set these environment variables
@@ -44,11 +45,16 @@ def create():
4445
if not client.collections.get("Question"):
4546
questions = client.collections.create(
4647
name="Question",
47-
# Set the vectorizer to "text2vec-openai" to use the OpenAI API for vector-related operations
48-
vectorizer_config=wvc.config.Configure.Vectorizer.text2vec_openai(),
49-
# Ensure the `generative-openai` module is used for generative queries
50-
generative_config=wvc.config.Configure.Generative.openai(),
51-
)
48+
properties=[
49+
wc.Property(name="answer", data_type=wc.DataType.TEXT),
50+
wc.Property(name="question", data_type=wc.DataType.TEXT),
51+
wc.Property(name="category", data_type=wc.DataType.TEXT),
52+
],
53+
# Define the vectorizer module
54+
vectorizer_config=wc.Configure.Vectorizer.text2vec_openai(),
55+
# Define the generative module
56+
generative_config=wc.Configure.Generative.openai(),
57+
)
5258

5359

5460
@with_langtrace_root_span("insert")

src/langtrace_python_sdk/instrumentation/weaviate/instrumentation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ class WeaviateInstrumentation(BaseInstrumentor):
3737
"""
3838

3939
def instrumentation_dependencies(self) -> Collection[str]:
40-
return ["weaviate-client >= 4.6.1", "trace-attributes >= 4.0.2"]
40+
return ["weaviate-client >= 4.6.1", "trace-attributes >= 7.0.3"]
4141

4242
def _instrument(self, **kwargs):
4343
tracer_provider = kwargs.get("tracer_provider")

src/langtrace_python_sdk/instrumentation/weaviate/patch.py

Lines changed: 22 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -16,21 +16,21 @@
1616

1717
import json
1818

19+
from importlib_metadata import version as v
1920
from langtrace.trace_attributes import DatabaseSpanAttributes
20-
from langtrace_python_sdk.utils.llm import get_span_name
2121
from opentelemetry import baggage, trace
2222
from opentelemetry.trace import SpanKind
23-
from opentelemetry.trace.status import Status, StatusCode
2423
from opentelemetry.trace.propagation import set_span_in_context
24+
from opentelemetry.trace.status import Status, StatusCode
25+
26+
from langtrace_python_sdk.constants import LANGTRACE_SDK_NAME
2527
from langtrace_python_sdk.constants.instrumentation.common import (
2628
LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY,
2729
SERVICE_PROVIDERS,
2830
)
2931
from langtrace_python_sdk.constants.instrumentation.weaviate import APIS
32+
from langtrace_python_sdk.utils.llm import get_span_name
3033
from langtrace_python_sdk.utils.misc import extract_input_params, to_iso_format
31-
from importlib_metadata import version as v
32-
33-
from langtrace_python_sdk.constants import LANGTRACE_SDK_NAME
3434

3535
# Predefined metadata response attributes
3636
METADATA_ATTRIBUTES = [
@@ -45,6 +45,22 @@
4545
]
4646

4747

48+
def extract_inputs(args, kwargs):
49+
extracted_params = {}
50+
kwargs_without_properties = {k: v for k, v in kwargs.items() if k != "properties"}
51+
extracted_params.update(extract_input_params(args, kwargs_without_properties))
52+
53+
if kwargs.get("properties", None):
54+
extracted_params["properties"] = []
55+
for each_prop in kwargs.get("properties"):
56+
if hasattr(each_prop, "_to_dict"):
57+
# append properties to extracted_params
58+
extracted_params["properties"].append(each_prop._to_dict())
59+
60+
extracted_params["properties"] = json.dumps(extracted_params["properties"])
61+
return extracted_params
62+
63+
4864
def extract_metadata(metadata):
4965
# Extraction response Query metadata
5066
extracted_metadata = {
@@ -126,7 +142,7 @@ def traced_method(wrapped, instance, args, kwargs):
126142
"db.system": "weaviate",
127143
"db.operation": api["OPERATION"],
128144
"db.collection.name": collection_name,
129-
"db.query": json.dumps(extract_input_params(args, kwargs)),
145+
"db.query": json.dumps(extract_inputs(args, kwargs)),
130146
**(extra_attributes if extra_attributes is not None else {}),
131147
}
132148

@@ -143,7 +159,6 @@ def traced_method(wrapped, instance, args, kwargs):
143159
try:
144160
# Attempt to call the original method
145161
result = wrapped(*args, **kwargs)
146-
print(result)
147162
if api["OPERATION"] in ["query", "generate"]:
148163
span.add_event(
149164
name="db.response",

src/langtrace_python_sdk/langtrace.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ def init(
7373
disable_instrumentations: Optional[DisableInstrumentations] = None,
7474
disable_tracing_for_functions: Optional[InstrumentationMethods] = None,
7575
service_name: Optional[str] = None,
76-
disable_logging = False
76+
disable_logging=False,
7777
):
7878
if disable_logging:
7979
sys.stdout = open(os.devnull, "w")
@@ -94,7 +94,9 @@ def init(
9494
provider = TracerProvider(resource=resource, sampler=sampler)
9595

9696
remote_write_exporter = (
97-
LangTraceExporter(api_key=api_key, api_host=host, disable_logging=disable_logging)
97+
LangTraceExporter(
98+
api_key=api_key, api_host=host, disable_logging=disable_logging
99+
)
98100
if custom_remote_exporter is None
99101
else custom_remote_exporter
100102
)

0 commit comments

Comments
 (0)