Skip to content

Commit 82a4ee3

Browse files
authored
Merge pull request #356 from Scale3-Labs/ali/dspy-enhance
Enhance DSPY traces
2 parents 5896458 + 91c14c7 commit 82a4ee3

File tree

10 files changed

+193
-61
lines changed

10 files changed

+193
-61
lines changed
Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
import dspy
2+
from langtrace_python_sdk import langtrace
3+
from dotenv import load_dotenv
4+
5+
load_dotenv()
6+
7+
langtrace.init(disable_instrumentations={"all_except": ["dspy", "anthropic"]})
8+
9+
# configure the language model to be used by dspy
10+
11+
llm = dspy.Claude()
12+
dspy.settings.configure(lm=llm)
13+
14+
# create a prompt format that says that the llm will take a question and give back an answer
15+
predict = dspy.Predict("question -> answer")
16+
prediction = predict(
17+
question="who scored the final goal in football world cup finals in 2014?"
18+
)
19+
20+
print(prediction.answer)
Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
import dspy
2+
from langtrace_python_sdk import langtrace
3+
from dotenv import load_dotenv
4+
5+
load_dotenv()
6+
7+
langtrace.init(disable_instrumentations={"all_except": ["dspy", "anthropic"]})
8+
9+
# configure the language model to be used by dspy
10+
llm = dspy.Claude()
11+
dspy.settings.configure(lm=llm)
12+
13+
14+
# create a signature for basic question answering
15+
class BasicQA(dspy.Signature):
16+
"""Given a question, generate the answer."""
17+
18+
question = dspy.InputField(desc="User's question")
19+
answer = dspy.OutputField(desc="often between 1 and 5 words")
20+
21+
22+
# create a prompt format that says that the llm will take a question and give back an answer
23+
predict = dspy.ChainOfThought(BasicQA)
24+
prediction = predict(
25+
question="Who provided the assist for the final goal in the 2014 FIFA World Cup final?"
26+
)
27+
28+
print(prediction.answer)
Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
import dspy
2+
from langtrace_python_sdk import langtrace
3+
from dotenv import load_dotenv
4+
5+
load_dotenv()
6+
7+
langtrace.init(disable_instrumentations={"all_except": ["dspy", "anthropic"]})
8+
9+
# configure the language model to be used by dspy
10+
llm = dspy.Claude()
11+
dspy.settings.configure(lm=llm)
12+
13+
14+
# create a signature for basic question answering
15+
class BasicQA(dspy.Signature):
16+
"""Answer questions with short factoid answers."""
17+
18+
question = dspy.InputField(
19+
desc="A question that can be answered with a short factoid answer"
20+
)
21+
answer = dspy.OutputField(desc="often between 1 and 5 words")
22+
23+
24+
# create a prompt format that says that the llm will take a question and give back an answer
25+
predict = dspy.Predict(BasicQA)
26+
prediction = predict(
27+
question="Sarah has 5 apples. She buys 7 more apples from the store. How many apples does Sarah have now?"
28+
)
29+
30+
print(prediction.answer)
Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
import dspy
2+
from langtrace_python_sdk import langtrace, with_langtrace_root_span
3+
from dotenv import load_dotenv
4+
5+
load_dotenv()
6+
7+
langtrace.init(disable_instrumentations={"all_except": ["dspy", "anthropic"]})
8+
9+
# configure the language model to be used by dspy
10+
llm = dspy.Claude()
11+
dspy.settings.configure(lm=llm)
12+
13+
14+
# create a signature for basic question answering
15+
class BasicQA(dspy.Signature):
16+
"""Given a question, generate the answer."""
17+
18+
question = dspy.InputField(desc="User's question")
19+
answer = dspy.OutputField(desc="often between 1 and 5 words")
20+
21+
22+
class DoubleChainOfThought(dspy.Module):
23+
def __init__(self):
24+
self.cot1 = dspy.ChainOfThought("question -> step_by_step_thought")
25+
self.cot2 = dspy.ChainOfThought("question, thought -> one_word_answer")
26+
27+
def forward(self, question):
28+
thought = self.cot1(question=question).step_by_step_thought
29+
answer = self.cot2(question=question, thought=thought).one_word_answer
30+
return dspy.Prediction(thought=thought, answer=answer)
31+
32+
33+
@with_langtrace_root_span(name="Double Chain Of thought")
34+
def main():
35+
multi_step_question = "what is the capital of the birth state of the person who provided the assist for the Mario Gotze's in football world cup in 2014?"
36+
double_cot = DoubleChainOfThought()
37+
result = double_cot(question=multi_step_question)
38+
print(result)
39+
40+
41+
main()

src/langtrace_python_sdk/extensions/langtrace_exporter.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,7 @@ def export(self, spans: typing.Sequence[ReadableSpan]) -> SpanExportResult:
124124
url=f"{self.api_host}",
125125
data=json.dumps(data),
126126
headers=headers,
127-
timeout=20,
127+
timeout=40,
128128
)
129129

130130
if not response.ok:

src/langtrace_python_sdk/instrumentation/anthropic/patch.py

Lines changed: 5 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -14,10 +14,8 @@
1414
limitations under the License.
1515
"""
1616

17-
from typing import Any, Callable, Dict, List, Optional, Iterator, TypedDict, Union
18-
from langtrace.trace_attributes import Event, SpanAttributes, LLMSpanAttributes
19-
from langtrace_python_sdk.utils import set_span_attribute
20-
from langtrace_python_sdk.utils.silently_fail import silently_fail
17+
from typing import Any, Callable, List, Iterator, Union
18+
from langtrace.trace_attributes import SpanAttributes, LLMSpanAttributes
2119
import json
2220

2321
from langtrace_python_sdk.utils.llm import (
@@ -28,6 +26,7 @@
2826
get_llm_url,
2927
get_span_name,
3028
set_event_completion,
29+
set_span_attributes,
3130
set_usage_attributes,
3231
set_span_attribute,
3332
)
@@ -39,8 +38,6 @@
3938
StreamingResult,
4039
ResultType,
4140
MessagesCreateKwargs,
42-
ContentItem,
43-
Usage,
4441
)
4542

4643

@@ -62,22 +59,20 @@ def traced_method(
6259
prompts = [{"role": "system", "content": system}] + kwargs.get(
6360
"messages", []
6461
)
65-
extraAttributes = get_extra_attributes()
6662
span_attributes = {
6763
**get_langtrace_attributes(version, service_provider),
6864
**get_llm_request_attributes(kwargs, prompts=prompts),
6965
**get_llm_url(instance),
7066
SpanAttributes.LLM_PATH: APIS["MESSAGES_CREATE"]["ENDPOINT"],
71-
**extraAttributes, # type: ignore
67+
**get_extra_attributes(),
7268
}
7369

7470
attributes = LLMSpanAttributes(**span_attributes)
7571

7672
span = tracer.start_span(
7773
name=get_span_name(APIS["MESSAGES_CREATE"]["METHOD"]), kind=SpanKind.CLIENT
7874
)
79-
for field, value in attributes.model_dump(by_alias=True).items():
80-
set_span_attribute(span, field, value)
75+
set_span_attributes(span, attributes)
8176
try:
8277
# Attempt to call the original method
8378
result = wrapped(*args, **kwargs)

src/langtrace_python_sdk/instrumentation/autogen/instrumentation.py

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -10,15 +10,9 @@ def instrumentation_dependencies(self):
1010
return ["autogen >= 0.1.0"]
1111

1212
def _instrument(self, **kwargs):
13-
print("Instrumneting autogen")
1413
tracer_provider = kwargs.get("tracer_provider")
1514
tracer = get_tracer(__name__, "", tracer_provider)
1615
version = v("autogen")
17-
# conversable_agent.intiate_chat
18-
# conversable_agent.register_function
19-
# agent.Agent
20-
# AgentCreation
21-
# Tools --> Register_for_llm, register_for_execution, register_for_function
2216
try:
2317
_W(
2418
module="autogen.agentchat.conversable_agent",

src/langtrace_python_sdk/instrumentation/dspy/patch.py

Lines changed: 63 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,12 @@
22
from importlib_metadata import version as v
33
from langtrace_python_sdk.constants import LANGTRACE_SDK_NAME
44
from langtrace_python_sdk.utils import set_span_attribute
5+
from langtrace_python_sdk.utils.llm import (
6+
get_extra_attributes,
7+
get_langtrace_attributes,
8+
get_span_name,
9+
set_span_attributes,
10+
)
511
from langtrace_python_sdk.utils.silently_fail import silently_fail
612
from langtrace_python_sdk.constants.instrumentation.common import (
713
LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY,
@@ -39,25 +45,29 @@ def traced_method(wrapped, instance, args, kwargs):
3945
),
4046
}
4147
span_attributes["dspy.optimizer.module.prog"] = json.dumps(prog)
42-
if hasattr(instance, 'metric'):
43-
span_attributes["dspy.optimizer.metric"] = getattr(instance, 'metric').__name__
48+
if hasattr(instance, "metric"):
49+
span_attributes["dspy.optimizer.metric"] = getattr(
50+
instance, "metric"
51+
).__name__
4452
if kwargs.get("trainset") and len(kwargs.get("trainset")) > 0:
4553
span_attributes["dspy.optimizer.trainset"] = str(kwargs.get("trainset"))
4654
config = {}
47-
if hasattr(instance, 'metric_threshold'):
48-
config["metric_threshold"] = getattr(instance, 'metric_threshold')
49-
if hasattr(instance, 'teacher_settings'):
50-
config["teacher_settings"] = getattr(instance, 'teacher_settings')
51-
if hasattr(instance, 'max_bootstrapped_demos'):
52-
config["max_bootstrapped_demos"] = getattr(instance, 'max_bootstrapped_demos')
53-
if hasattr(instance, 'max_labeled_demos'):
54-
config["max_labeled_demos"] = getattr(instance, 'max_labeled_demos')
55-
if hasattr(instance, 'max_rounds'):
56-
config["max_rounds"] = getattr(instance, 'max_rounds')
57-
if hasattr(instance, 'max_steps'):
58-
config["max_errors"] = getattr(instance, 'max_errors')
59-
if hasattr(instance, 'error_count'):
60-
config["error_count"] = getattr(instance, 'error_count')
55+
if hasattr(instance, "metric_threshold"):
56+
config["metric_threshold"] = getattr(instance, "metric_threshold")
57+
if hasattr(instance, "teacher_settings"):
58+
config["teacher_settings"] = getattr(instance, "teacher_settings")
59+
if hasattr(instance, "max_bootstrapped_demos"):
60+
config["max_bootstrapped_demos"] = getattr(
61+
instance, "max_bootstrapped_demos"
62+
)
63+
if hasattr(instance, "max_labeled_demos"):
64+
config["max_labeled_demos"] = getattr(instance, "max_labeled_demos")
65+
if hasattr(instance, "max_rounds"):
66+
config["max_rounds"] = getattr(instance, "max_rounds")
67+
if hasattr(instance, "max_steps"):
68+
config["max_errors"] = getattr(instance, "max_errors")
69+
if hasattr(instance, "error_count"):
70+
config["error_count"] = getattr(instance, "error_count")
6171
if config and len(config) > 0:
6272
span_attributes["dspy.optimizer.config"] = json.dumps(config)
6373

@@ -96,37 +106,36 @@ def patch_signature(operation_name, version, tracer):
96106
def traced_method(wrapped, instance, args, kwargs):
97107

98108
service_provider = SERVICE_PROVIDERS["DSPY"]
99-
extra_attributes = baggage.get_baggage(LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY)
100109
span_attributes = {
101-
"langtrace.sdk.name": "langtrace-python-sdk",
102-
"langtrace.service.name": service_provider,
103-
"langtrace.service.type": "framework",
104-
"langtrace.service.version": version,
105-
"langtrace.version": v(LANGTRACE_SDK_NAME),
106-
**(extra_attributes if extra_attributes is not None else {}),
110+
**get_langtrace_attributes(
111+
service_provider=service_provider,
112+
version=version,
113+
vendor_type="framework",
114+
),
115+
**get_extra_attributes(),
107116
}
108117

109-
# passed operation name
110-
opname = operation_name
111-
if extra_attributes is not None and "langtrace.span.name" in extra_attributes:
112-
# append the operation name to the span name
113-
opname = f"{operation_name}-{extra_attributes['langtrace.span.name']}"
114-
115118
if instance.__class__.__name__:
116119
span_attributes["dspy.signature.name"] = instance.__class__.__name__
117-
span_attributes["dspy.signature"] = str(instance)
120+
span_attributes["dspy.signature"] = str(instance.signature)
118121

119122
if kwargs and len(kwargs) > 0:
120123
span_attributes["dspy.signature.args"] = str(kwargs)
121124

122125
attributes = FrameworkSpanAttributes(**span_attributes)
123-
with tracer.start_as_current_span(opname, kind=SpanKind.CLIENT) as span:
124-
_set_input_attributes(span, kwargs, attributes)
126+
with tracer.start_as_current_span(
127+
get_span_name(operation_name=operation_name), kind=SpanKind.CLIENT
128+
) as span:
129+
set_span_attributes(span, attributes)
125130

126131
try:
127132
result = wrapped(*args, **kwargs)
128133
if result:
129-
set_span_attribute(span, "dspy.signature.result", str(result))
134+
set_span_attribute(
135+
span,
136+
"dspy.signature.result",
137+
json.dumps(result.toDict()),
138+
)
130139
span.set_status(Status(StatusCode.OK))
131140

132141
span.end()
@@ -168,27 +177,41 @@ def traced_method(wrapped, instance, args, kwargs):
168177
if hasattr(instance, "devset"):
169178
span_attributes["dspy.evaluate.devset"] = str(getattr(instance, "devset"))
170179
if hasattr(instance, "trainset"):
171-
span_attributes["dspy.evaluate.display"] = str(getattr(instance, "trainset"))
180+
span_attributes["dspy.evaluate.display"] = str(
181+
getattr(instance, "trainset")
182+
)
172183
if hasattr(instance, "num_threads"):
173-
span_attributes["dspy.evaluate.num_threads"] = str(getattr(instance, "num_threads"))
184+
span_attributes["dspy.evaluate.num_threads"] = str(
185+
getattr(instance, "num_threads")
186+
)
174187
if hasattr(instance, "return_outputs"):
175188
span_attributes["dspy.evaluate.return_outputs"] = str(
176189
getattr(instance, "return_outputs")
177190
)
178191
if hasattr(instance, "display_table"):
179-
span_attributes["dspy.evaluate.display_table"] = str(getattr(instance, "display_table"))
192+
span_attributes["dspy.evaluate.display_table"] = str(
193+
getattr(instance, "display_table")
194+
)
180195
if hasattr(instance, "display_progress"):
181196
span_attributes["dspy.evaluate.display_progress"] = str(
182197
getattr(instance, "display_progress")
183198
)
184199
if hasattr(instance, "metric"):
185-
span_attributes["dspy.evaluate.metric"] = getattr(instance, "metric").__name__
200+
span_attributes["dspy.evaluate.metric"] = getattr(
201+
instance, "metric"
202+
).__name__
186203
if hasattr(instance, "error_count"):
187-
span_attributes["dspy.evaluate.error_count"] = str(getattr(instance, "error_count"))
204+
span_attributes["dspy.evaluate.error_count"] = str(
205+
getattr(instance, "error_count")
206+
)
188207
if hasattr(instance, "error_lock"):
189-
span_attributes["dspy.evaluate.error_lock"] = str(getattr(instance, "error_lock"))
208+
span_attributes["dspy.evaluate.error_lock"] = str(
209+
getattr(instance, "error_lock")
210+
)
190211
if hasattr(instance, "max_errors"):
191-
span_attributes["dspy.evaluate.max_errors"] = str(getattr(instance, "max_errors"))
212+
span_attributes["dspy.evaluate.max_errors"] = str(
213+
getattr(instance, "max_errors")
214+
)
192215
if args and len(args) > 0:
193216
span_attributes["dspy.evaluate.args"] = str(args)
194217

src/langtrace_python_sdk/utils/llm.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -18,15 +18,14 @@
1818
from langtrace_python_sdk.constants import LANGTRACE_SDK_NAME
1919
from langtrace_python_sdk.utils import set_span_attribute
2020
from langtrace_python_sdk.types import NOT_GIVEN
21-
from tiktoken import get_encoding
2221
from tiktoken import get_encoding, list_encoding_names
2322

2423
from langtrace_python_sdk.constants.instrumentation.common import (
2524
LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY,
2625
TIKTOKEN_MODEL_MAPPING,
2726
)
2827
from langtrace_python_sdk.constants.instrumentation.openai import OPENAI_COST_TABLE
29-
from langtrace.trace_attributes import SpanAttributes, Event
28+
from langtrace.trace_attributes import SpanAttributes
3029
from importlib_metadata import version as v
3130
import json
3231
from opentelemetry import baggage
@@ -142,7 +141,9 @@ def get_llm_request_attributes(kwargs, prompts=None, model=None, operation_name=
142141
SpanAttributes.LLM_FREQUENCY_PENALTY: kwargs.get("frequency_penalty"),
143142
SpanAttributes.LLM_REQUEST_SEED: kwargs.get("seed"),
144143
SpanAttributes.LLM_TOOLS: json.dumps(tools) if tools else None,
145-
SpanAttributes.LLM_TOOL_CHOICE: json.dumps(tool_choice) if tool_choice else None,
144+
SpanAttributes.LLM_TOOL_CHOICE: (
145+
json.dumps(tool_choice) if tool_choice else None
146+
),
146147
SpanAttributes.LLM_REQUEST_LOGPROPS: kwargs.get("logprobs"),
147148
SpanAttributes.LLM_REQUEST_LOGITBIAS: kwargs.get("logit_bias"),
148149
SpanAttributes.LLM_REQUEST_TOP_LOGPROPS: kwargs.get("top_logprobs"),
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
__version__ = "2.3.18"
1+
__version__ = "2.3.19"

0 commit comments

Comments
 (0)