Skip to content

Commit a8c6cc4

Browse files
Release 2.2.3 (#250)
* remove logs * remove requirements * Bump version * DSPy - Bugfixes and update to dspy-ai (#246) * Bugfix DSPy instrumentation * Add example for parallel execution * Bump version * chore: add back openai tool choice arg (#245) * chore: add back openai tool choice arg * style: fix formating * Allow DSPy span naming (#249) * Update example * Allow span naming using langtrace.span.name * Bump version --------- Co-authored-by: darshit-s3 <[email protected]>
1 parent e0e96b1 commit a8c6cc4

File tree

7 files changed

+109
-20
lines changed

7 files changed

+109
-20
lines changed

src/examples/dspy_example/math_problems_cot_parallel.py

Lines changed: 6 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
1+
import contextvars
12
import dspy
23
from dspy.datasets.gsm8k import GSM8K, gsm8k_metric
34
from dspy.teleprompt import BootstrapFewShot
45
from concurrent.futures import ThreadPoolExecutor
5-
from opentelemetry.context import get_current, attach, detach
66

77
# flake8: noqa
88
from langtrace_python_sdk import langtrace, with_langtrace_root_span
@@ -22,7 +22,8 @@ def __init__(self):
2222
self.prog = dspy.ChainOfThought("question -> answer")
2323

2424
def forward(self, question):
25-
return self.prog(question=question)
25+
result = inject_additional_attributes(lambda: self.prog(question=question), {'langtrace.span.name': 'MathProblemsCotParallel'})
26+
return result
2627

2728
@with_langtrace_root_span(name="parallel_example")
2829
def example():
@@ -34,21 +35,12 @@ def example():
3435
optimized_cot = teleprompter.compile(CoT(), trainset=gsm8k_trainset)
3536

3637
questions = [
37-
"What is the cosine of 0?",
38-
"What is the tangent of 0?",
38+
"What is the sine of 0?",
39+
"What is the tangent of 100?",
3940
]
4041

41-
current_context = get_current()
42-
43-
def run_with_context(context, func, *args, **kwargs):
44-
token = attach(context)
45-
try:
46-
return func(*args, **kwargs)
47-
finally:
48-
detach(token)
49-
5042
with ThreadPoolExecutor(max_workers=2) as executor:
51-
futures = [executor.submit(run_with_context, current_context, optimized_cot, question=q) for q in questions]
43+
futures = [executor.submit(contextvars.copy_context().run, optimized_cot, question=q) for q in questions]
5244

5345
for future in futures:
5446
ans = future.result()
Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,73 @@
1+
# Example taken from https://platform.openai.com/docs/guides/function-calling
2+
import json
3+
4+
from dotenv import find_dotenv, load_dotenv
5+
from openai import OpenAI
6+
7+
from langtrace_python_sdk import langtrace
8+
9+
client = OpenAI()
10+
11+
_ = load_dotenv(find_dotenv())
12+
13+
langtrace.init(
14+
write_spans_to_console=True,
15+
)
16+
17+
18+
# Example dummy function hard coded to return the same weather
19+
# In production, this could be your backend API or an external API
20+
def get_current_weather(location, unit="fahrenheit"):
21+
"""Get the current weather in a given location"""
22+
if "tokyo" in location.lower():
23+
return json.dumps({"location": "Tokyo", "temperature": "10", "unit": unit})
24+
elif "san francisco" in location.lower():
25+
return json.dumps(
26+
{"location": "San Francisco", "temperature": "72", "unit": unit}
27+
)
28+
elif "paris" in location.lower():
29+
return json.dumps({"location": "Paris", "temperature": "22", "unit": unit})
30+
else:
31+
return json.dumps({"location": location, "temperature": "unknown"})
32+
33+
34+
def run_conversation():
35+
# Step 1: send the conversation and available functions to the model
36+
messages = [
37+
{
38+
"role": "user",
39+
"content": "What's the weather like in San Francisco, Tokyo, and Paris?",
40+
}
41+
]
42+
tools = [
43+
{
44+
"type": "function",
45+
"function": {
46+
"name": "get_current_weather",
47+
"description": "Get the current weather in a given location",
48+
"parameters": {
49+
"type": "object",
50+
"properties": {
51+
"location": {
52+
"type": "string",
53+
"description": "The city and state, e.g. San Francisco, CA",
54+
},
55+
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
56+
},
57+
"required": ["location"],
58+
},
59+
},
60+
}
61+
]
62+
response = client.chat.completions.create(
63+
model="gpt-4o",
64+
messages=messages,
65+
tools=tools,
66+
tool_choice="required", # auto is default, but we'll be explicit
67+
)
68+
response_message = response.choices[0].message
69+
tool_calls = response_message.tool_calls
70+
print(tool_calls)
71+
72+
73+
print(run_conversation())

src/langtrace_python_sdk/instrumentation/anthropic/patch.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,9 @@ def traced_method(wrapped, instance, args, kwargs):
4848
prompts = kwargs.get("messages", [])
4949
system = kwargs.get("system")
5050
if system:
51-
prompts = [{"role": "system", "content": system}] + kwargs.get("messages", [])
51+
prompts = [{"role": "system", "content": system}] + kwargs.get(
52+
"messages", []
53+
)
5254

5355
span_attributes = {
5456
**get_langtrace_attributes(version, service_provider),

src/langtrace_python_sdk/instrumentation/dspy/patch.py

Lines changed: 21 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -61,8 +61,14 @@ def traced_method(wrapped, instance, args, kwargs):
6161
if config and len(config) > 0:
6262
span_attributes["dspy.optimizer.config"] = json.dumps(config)
6363

64+
# passed operation name
65+
opname = operation_name
66+
if extra_attributes is not None and "langtrace.span.name" in extra_attributes:
67+
# append the operation name to the span name
68+
opname = f"{operation_name}-{extra_attributes['langtrace.span.name']}"
69+
6470
attributes = FrameworkSpanAttributes(**span_attributes)
65-
with tracer.start_as_current_span(operation_name, kind=SpanKind.CLIENT) as span:
71+
with tracer.start_as_current_span(opname, kind=SpanKind.CLIENT) as span:
6672
_set_input_attributes(span, kwargs, attributes)
6773

6874
try:
@@ -100,6 +106,12 @@ def traced_method(wrapped, instance, args, kwargs):
100106
**(extra_attributes if extra_attributes is not None else {}),
101107
}
102108

109+
# passed operation name
110+
opname = operation_name
111+
if extra_attributes is not None and "langtrace.span.name" in extra_attributes:
112+
# append the operation name to the span name
113+
opname = f"{operation_name}-{extra_attributes['langtrace.span.name']}"
114+
103115
if instance.__class__.__name__:
104116
span_attributes["dspy.signature.name"] = instance.__class__.__name__
105117
span_attributes["dspy.signature"] = str(instance)
@@ -108,7 +120,7 @@ def traced_method(wrapped, instance, args, kwargs):
108120
span_attributes["dspy.signature.args"] = str(kwargs)
109121

110122
attributes = FrameworkSpanAttributes(**span_attributes)
111-
with tracer.start_as_current_span(operation_name, kind=SpanKind.CLIENT) as span:
123+
with tracer.start_as_current_span(opname, kind=SpanKind.CLIENT) as span:
112124
_set_input_attributes(span, kwargs, attributes)
113125

114126
try:
@@ -147,6 +159,12 @@ def traced_method(wrapped, instance, args, kwargs):
147159
**(extra_attributes if extra_attributes is not None else {}),
148160
}
149161

162+
# passed operation name
163+
opname = operation_name
164+
if extra_attributes is not None and "langtrace.span.name" in extra_attributes:
165+
# append the operation name to the span name
166+
opname = f"{operation_name}-{extra_attributes['langtrace.span.name']}"
167+
150168
if hasattr(instance, "devset"):
151169
span_attributes["dspy.evaluate.devset"] = str(getattr(instance, "devset"))
152170
if hasattr(instance, "trainset"):
@@ -175,7 +193,7 @@ def traced_method(wrapped, instance, args, kwargs):
175193
span_attributes["dspy.evaluate.args"] = str(args)
176194

177195
attributes = FrameworkSpanAttributes(**span_attributes)
178-
with tracer.start_as_current_span(operation_name, kind=SpanKind.CLIENT) as span:
196+
with tracer.start_as_current_span(opname, kind=SpanKind.CLIENT) as span:
179197
_set_input_attributes(span, kwargs, attributes)
180198

181199
try:

src/langtrace_python_sdk/instrumentation/gemini/patch.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,10 @@ def get_llm_model(instance):
110110

111111
def serialize_prompts(args, kwargs, instance):
112112
prompts = []
113-
if hasattr(instance, "_system_instruction") and instance._system_instruction is not None:
113+
if (
114+
hasattr(instance, "_system_instruction")
115+
and instance._system_instruction is not None
116+
):
114117
system_prompt = {
115118
"role": "system",
116119
"content": instance._system_instruction.__dict__["_pb"].parts[0].text,

src/langtrace_python_sdk/utils/llm.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,7 @@ def get_llm_request_attributes(kwargs, prompts=None, model=None):
123123
SpanAttributes.LLM_FREQUENCY_PENALTY: kwargs.get("frequency_penalty"),
124124
SpanAttributes.LLM_REQUEST_SEED: kwargs.get("seed"),
125125
SpanAttributes.LLM_TOOLS: json.dumps(tools) if tools else None,
126+
SpanAttributes.LLM_TOOL_CHOICE: kwargs.get("tool_choice"),
126127
SpanAttributes.LLM_REQUEST_LOGPROPS: kwargs.get("logprobs"),
127128
SpanAttributes.LLM_REQUEST_LOGITBIAS: kwargs.get("logit_bias"),
128129
SpanAttributes.LLM_REQUEST_TOP_LOGPROPS: kwargs.get("top_logprobs"),
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
__version__ = "2.2.2"
1+
__version__ = "2.2.3"

0 commit comments

Comments
 (0)