Skip to content

Commit da82f72

Browse files
committed
bump version
2 parents 372ae29 + a26ac03 commit da82f72

File tree

20 files changed

+441
-160
lines changed

20 files changed

+441
-160
lines changed

src/examples/cohere_example/rerank.py

Lines changed: 17 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import cohere
22
from dotenv import find_dotenv, load_dotenv
3+
from datetime import datetime
34

45
from langtrace_python_sdk import langtrace
56

@@ -16,10 +17,22 @@
1617
# @with_langtrace_root_span("embed_create")
1718
def rerank():
1819
docs = [
19-
"Carson City is the capital city of the American state of Nevada.",
20-
"The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific Ocean. Its capital is Saipan.",
21-
"Washington, D.C. (also known as simply Washington or D.C., and officially as the District of Columbia) is the capital of the United States. It is a federal district.",
22-
"Capital punishment (the death penalty) has existed in the United States since beforethe United States was a country. As of 2017, capital punishment is legal in 30 of the 50 states.",
20+
{
21+
"text": "Carson City is the capital city of the American state of Nevada.",
22+
"date": datetime.now(),
23+
},
24+
{
25+
"text": "The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific Ocean. Its capital is Saipan.",
26+
"date": datetime(2020, 5, 17),
27+
},
28+
{
29+
"text": "Washington, D.C. (also known as simply Washington or D.C., and officially as the District of Columbia) is the capital of the United States. It is a federal district.",
30+
"date": datetime(1776, 7, 4),
31+
},
32+
{
33+
"text": "Capital punishment (the death penalty) has existed in the United States since before the United States was a country. As of 2017, capital punishment is legal in 30 of the 50 states.",
34+
"date": datetime(2023, 9, 14),
35+
},
2336
]
2437

2538
response = co.rerank(
Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
import dspy
2+
from langtrace_python_sdk import langtrace
3+
from dotenv import load_dotenv
4+
5+
load_dotenv()
6+
7+
langtrace.init(disable_instrumentations={"all_except": ["dspy", "anthropic"]})
8+
9+
# configure the language model to be used by dspy
10+
11+
llm = dspy.Claude()
12+
dspy.settings.configure(lm=llm)
13+
14+
# create a prompt format that says that the llm will take a question and give back an answer
15+
predict = dspy.Predict("question -> answer")
16+
prediction = predict(
17+
question="who scored the final goal in football world cup finals in 2014?"
18+
)
19+
20+
print(prediction.answer)
Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
import dspy
2+
from langtrace_python_sdk import langtrace
3+
from dotenv import load_dotenv
4+
5+
load_dotenv()
6+
7+
langtrace.init(disable_instrumentations={"all_except": ["dspy", "anthropic"]})
8+
9+
# configure the language model to be used by dspy
10+
llm = dspy.Claude()
11+
dspy.settings.configure(lm=llm)
12+
13+
14+
# create a signature for basic question answering
15+
class BasicQA(dspy.Signature):
16+
"""Given a question, generate the answer."""
17+
18+
question = dspy.InputField(desc="User's question")
19+
answer = dspy.OutputField(desc="often between 1 and 5 words")
20+
21+
22+
# create a prompt format that says that the llm will take a question and give back an answer
23+
predict = dspy.ChainOfThought(BasicQA)
24+
prediction = predict(
25+
question="Who provided the assist for the final goal in the 2014 FIFA World Cup final?"
26+
)
27+
28+
print(prediction.answer)
Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
import dspy
2+
from langtrace_python_sdk import langtrace
3+
from dotenv import load_dotenv
4+
5+
load_dotenv()
6+
7+
langtrace.init(disable_instrumentations={"all_except": ["dspy", "anthropic"]})
8+
9+
# configure the language model to be used by dspy
10+
llm = dspy.Claude()
11+
dspy.settings.configure(lm=llm)
12+
13+
14+
# create a signature for basic question answering
15+
class BasicQA(dspy.Signature):
16+
"""Answer questions with short factoid answers."""
17+
18+
question = dspy.InputField(
19+
desc="A question that can be answered with a short factoid answer"
20+
)
21+
answer = dspy.OutputField(desc="often between 1 and 5 words")
22+
23+
24+
# create a prompt format that says that the llm will take a question and give back an answer
25+
predict = dspy.Predict(BasicQA)
26+
prediction = predict(
27+
question="Sarah has 5 apples. She buys 7 more apples from the store. How many apples does Sarah have now?"
28+
)
29+
30+
print(prediction.answer)
Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
import dspy
2+
from langtrace_python_sdk import langtrace, with_langtrace_root_span
3+
from dotenv import load_dotenv
4+
5+
load_dotenv()
6+
7+
langtrace.init(disable_instrumentations={"all_except": ["dspy", "anthropic"]})
8+
9+
# configure the language model to be used by dspy
10+
llm = dspy.Claude()
11+
dspy.settings.configure(lm=llm)
12+
13+
14+
# create a signature for basic question answering
15+
class BasicQA(dspy.Signature):
16+
"""Given a question, generate the answer."""
17+
18+
question = dspy.InputField(desc="User's question")
19+
answer = dspy.OutputField(desc="often between 1 and 5 words")
20+
21+
22+
class DoubleChainOfThought(dspy.Module):
23+
def __init__(self):
24+
self.cot1 = dspy.ChainOfThought("question -> step_by_step_thought")
25+
self.cot2 = dspy.ChainOfThought("question, thought -> one_word_answer")
26+
27+
def forward(self, question):
28+
thought = self.cot1(question=question).step_by_step_thought
29+
answer = self.cot2(question=question, thought=thought).one_word_answer
30+
return dspy.Prediction(thought=thought, answer=answer)
31+
32+
33+
@with_langtrace_root_span(name="Double Chain Of thought")
34+
def main():
35+
multi_step_question = "what is the capital of the birth state of the person who provided the assist for the Mario Gotze's in football world cup in 2014?"
36+
double_cot = DoubleChainOfThought()
37+
result = double_cot(question=multi_step_question)
38+
print(result)
39+
40+
41+
main()

src/examples/langchain_example/__init__.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
from .basic import basic_app, rag, load_and_split
33
from langtrace_python_sdk import with_langtrace_root_span
44

5-
from .groq_example import groq_basic, groq_streaming
5+
from .groq_example import groq_basic, groq_tool_choice, groq_streaming
66
from .langgraph_example_tools import basic_graph_tools
77

88

@@ -20,3 +20,5 @@ class GroqRunner:
2020
@with_langtrace_root_span("Groq")
2121
def run(self):
2222
groq_streaming()
23+
groq_basic()
24+
groq_tool_choice()

src/examples/langchain_example/groq_example.py

Lines changed: 78 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
1+
import json
2+
13
from dotenv import find_dotenv, load_dotenv
2-
from langchain_core.prompts import ChatPromptTemplate
3-
from langchain_groq import ChatGroq
44
from groq import Groq
55

66
_ = load_dotenv(find_dotenv())
@@ -30,6 +30,82 @@ def groq_basic():
3030
return chat_completion
3131

3232

33+
def groq_tool_choice():
34+
35+
user_prompt = "What is 25 * 4 + 10?"
36+
MODEL = "llama3-groq-70b-8192-tool-use-preview"
37+
38+
def calculate(expression):
39+
"""Evaluate a mathematical expression"""
40+
try:
41+
result = eval(expression)
42+
return json.dumps({"result": result})
43+
except:
44+
return json.dumps({"error": "Invalid expression"})
45+
46+
messages = [
47+
{
48+
"role": "system",
49+
"content": "You are a calculator assistant. Use the calculate function to perform mathematical operations and provide the results.",
50+
},
51+
{
52+
"role": "user",
53+
"content": user_prompt,
54+
},
55+
]
56+
tools = [
57+
{
58+
"type": "function",
59+
"function": {
60+
"name": "calculate",
61+
"description": "Evaluate a mathematical expression",
62+
"parameters": {
63+
"type": "object",
64+
"properties": {
65+
"expression": {
66+
"type": "string",
67+
"description": "The mathematical expression to evaluate",
68+
}
69+
},
70+
"required": ["expression"],
71+
},
72+
},
73+
}
74+
]
75+
response = client.chat.completions.create(
76+
model=MODEL,
77+
messages=messages,
78+
tools=tools,
79+
tool_choice={"type": "function", "function": {"name": "calculate"}},
80+
max_tokens=4096,
81+
)
82+
83+
response_message = response.choices[0].message
84+
tool_calls = response_message.tool_calls
85+
if tool_calls:
86+
available_functions = {
87+
"calculate": calculate,
88+
}
89+
messages.append(response_message)
90+
for tool_call in tool_calls:
91+
function_name = tool_call.function.name
92+
function_to_call = available_functions[function_name]
93+
function_args = json.loads(tool_call.function.arguments)
94+
function_response = function_to_call(
95+
expression=function_args.get("expression")
96+
)
97+
messages.append(
98+
{
99+
"tool_call_id": tool_call.id,
100+
"role": "tool",
101+
"name": function_name,
102+
"content": function_response,
103+
}
104+
)
105+
second_response = client.chat.completions.create(model=MODEL, messages=messages)
106+
return second_response.choices[0].message.content
107+
108+
33109
def groq_streaming():
34110
chat_completion = client.chat.completions.create(
35111
messages=[

src/langtrace_python_sdk/extensions/langtrace_exporter.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,7 @@ def export(self, spans: typing.Sequence[ReadableSpan]) -> SpanExportResult:
124124
url=f"{self.api_host}",
125125
data=json.dumps(data),
126126
headers=headers,
127-
timeout=20,
127+
timeout=40,
128128
)
129129

130130
if not response.ok:

src/langtrace_python_sdk/instrumentation/anthropic/patch.py

Lines changed: 6 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -14,10 +14,8 @@
1414
limitations under the License.
1515
"""
1616

17-
from typing import Any, Callable, Dict, List, Optional, Iterator, TypedDict, Union
18-
from langtrace.trace_attributes import Event, SpanAttributes, LLMSpanAttributes
19-
from langtrace_python_sdk.utils import set_span_attribute
20-
from langtrace_python_sdk.utils.silently_fail import silently_fail
17+
from typing import Any, Callable, List, Iterator, Union
18+
from langtrace.trace_attributes import SpanAttributes, LLMSpanAttributes
2119
import json
2220

2321
from langtrace_python_sdk.utils.llm import (
@@ -28,6 +26,7 @@
2826
get_llm_url,
2927
get_span_name,
3028
set_event_completion,
29+
set_span_attributes,
3130
set_usage_attributes,
3231
set_span_attribute,
3332
)
@@ -39,8 +38,6 @@
3938
StreamingResult,
4039
ResultType,
4140
MessagesCreateKwargs,
42-
ContentItem,
43-
Usage,
4441
)
4542

4643

@@ -62,22 +59,20 @@ def traced_method(
6259
prompts = [{"role": "system", "content": system}] + kwargs.get(
6360
"messages", []
6461
)
65-
extraAttributes = get_extra_attributes()
6662
span_attributes = {
6763
**get_langtrace_attributes(version, service_provider),
6864
**get_llm_request_attributes(kwargs, prompts=prompts),
6965
**get_llm_url(instance),
7066
SpanAttributes.LLM_PATH: APIS["MESSAGES_CREATE"]["ENDPOINT"],
71-
**extraAttributes, # type: ignore
67+
**get_extra_attributes(),
7268
}
7369

7470
attributes = LLMSpanAttributes(**span_attributes)
7571

7672
span = tracer.start_span(
7773
name=get_span_name(APIS["MESSAGES_CREATE"]["METHOD"]), kind=SpanKind.CLIENT
7874
)
79-
for field, value in attributes.model_dump(by_alias=True).items():
80-
set_span_attribute(span, field, value)
75+
set_span_attributes(span, attributes)
8176
try:
8277
# Attempt to call the original method
8378
result = wrapped(*args, **kwargs)
@@ -112,7 +107,7 @@ def set_response_attributes(
112107
if typ == "text":
113108
content = result.content[0].text
114109
set_event_completion(
115-
span, [{"type": typ, role: role, content: content}]
110+
span, [{"type": typ, "role": role, "content": content}]
116111
)
117112

118113
if (

src/langtrace_python_sdk/instrumentation/autogen/instrumentation.py

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -10,15 +10,9 @@ def instrumentation_dependencies(self):
1010
return ["autogen >= 0.1.0"]
1111

1212
def _instrument(self, **kwargs):
13-
print("Instrumneting autogen")
1413
tracer_provider = kwargs.get("tracer_provider")
1514
tracer = get_tracer(__name__, "", tracer_provider)
1615
version = v("autogen")
17-
# conversable_agent.intiate_chat
18-
# conversable_agent.register_function
19-
# agent.Agent
20-
# AgentCreation
21-
# Tools --> Register_for_llm, register_for_execution, register_for_function
2216
try:
2317
_W(
2418
module="autogen.agentchat.conversable_agent",

0 commit comments

Comments
 (0)