Skip to content

Commit 61e62f9

Browse files
authored
Merge pull request #240 from Scale3-Labs/ali/quick-fixes
Hotfixes for prompt handling
2 parents f74ab29 + 980bb85 commit 61e62f9

File tree

5 files changed

+22
-38
lines changed

5 files changed

+22
-38
lines changed

src/examples/openai_example/embeddings_create.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,5 +16,6 @@ def embeddings_create():
1616
result = client.embeddings.create(
1717
model="text-embedding-ada-002",
1818
input="Once upon a time, there was a pirate.",
19+
encoding_format="float",
1920
)
2021
return result

src/examples/openai_example/images_edit.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,8 @@ def image_edit():
2323

2424
response = client.images.edit(
2525
model="dall-e-2",
26-
image=open("./resources/lounge_flamingo.png", "rb"),
27-
mask=open("./resources/mask.png", "rb"),
26+
image=open("src/examples/openai_example/resources/lounge_flamingo.png", "rb"),
27+
mask=open("src/examples/openai_example/resources/mask.png", "rb"),
2828
prompt="A sunlit indoor lounge area with a pool and duck standing in side with flamingo.",
2929
n=1,
3030
size="1024x1024",

src/langtrace_python_sdk/instrumentation/ollama/patch.py

Lines changed: 5 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,4 @@
11
from langtrace_python_sdk.constants.instrumentation.ollama import APIS
2-
from importlib_metadata import version as v
3-
from langtrace_python_sdk.constants import LANGTRACE_SDK_NAME
42
from langtrace_python_sdk.utils import set_span_attribute
53
from langtrace_python_sdk.utils.llm import (
64
get_extra_attributes,
@@ -10,11 +8,7 @@
108
set_event_completion,
119
)
1210
from langtrace_python_sdk.utils.silently_fail import silently_fail
13-
from langtrace_python_sdk.constants.instrumentation.common import (
14-
LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY,
15-
SERVICE_PROVIDERS,
16-
)
17-
from opentelemetry import baggage
11+
from langtrace_python_sdk.constants.instrumentation.common import SERVICE_PROVIDERS
1812
from langtrace.trace_attributes import LLMSpanAttributes, Event
1913
from opentelemetry.trace import SpanKind
2014
import json
@@ -28,7 +22,10 @@ def traced_method(wrapped, instance, args, kwargs):
2822
service_provider = SERVICE_PROVIDERS["OLLAMA"]
2923
span_attributes = {
3024
**get_langtrace_attributes(version, service_provider),
31-
**get_llm_request_attributes(kwargs),
25+
**get_llm_request_attributes(
26+
kwargs,
27+
prompts=kwargs.get("messages", None),
28+
),
3229
**get_llm_url(instance),
3330
SpanAttributes.LLM_PATH: api["ENDPOINT"],
3431
SpanAttributes.LLM_RESPONSE_FORMAT: kwargs.get("format"),
@@ -146,18 +143,6 @@ def _set_input_attributes(span, kwargs, attributes):
146143
for field, value in attributes.model_dump(by_alias=True).items():
147144
set_span_attribute(span, field, value)
148145

149-
if "messages" in kwargs:
150-
set_span_attribute(
151-
span,
152-
SpanAttributes.LLM_PROMPTS,
153-
json.dumps(kwargs.get("messages", [])),
154-
)
155-
if "prompt" in kwargs:
156-
set_span_attribute(
157-
span,
158-
SpanAttributes.LLM_PROMPTS,
159-
json.dumps([{"role": "user", "content": kwargs.get("prompt", "")}]),
160-
)
161146
if "options" in kwargs:
162147
set_span_attribute(
163148
span,

src/langtrace_python_sdk/instrumentation/openai/patch.py

Lines changed: 6 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@
4141
is_streaming,
4242
set_event_completion,
4343
StreamWrapper,
44+
set_span_attributes,
4445
)
4546
from openai._types import NOT_GIVEN
4647

@@ -67,8 +68,7 @@ def traced_method(wrapped, instance, args, kwargs):
6768
kind=SpanKind.CLIENT.value,
6869
context=set_span_in_context(trace.get_current_span()),
6970
) as span:
70-
for field, value in attributes.model_dump(by_alias=True).items():
71-
set_span_attribute(span, field, value)
71+
set_span_attributes(span, attributes)
7272
try:
7373
# Attempt to call the original method
7474
result = wrapped(*args, **kwargs)
@@ -131,8 +131,7 @@ async def traced_method(wrapped, instance, args, kwargs):
131131
kind=SpanKind.CLIENT.value,
132132
context=set_span_in_context(trace.get_current_span()),
133133
) as span:
134-
for field, value in attributes.model_dump(by_alias=True).items():
135-
set_span_attribute(span, field, value)
134+
set_span_attributes(span, attributes)
136135
try:
137136
# Attempt to call the original method
138137
result = await wrapped(*args, **kwargs)
@@ -197,9 +196,7 @@ def traced_method(wrapped, instance, args, kwargs):
197196
kind=SpanKind.CLIENT.value,
198197
context=set_span_in_context(trace.get_current_span()),
199198
) as span:
200-
for field, value in attributes.model_dump(by_alias=True).items():
201-
if value is not None:
202-
span.set_attribute(field, value)
199+
set_span_attributes(span, attributes)
203200
try:
204201
# Attempt to call the original method
205202
result = wrapped(*args, **kwargs)
@@ -463,8 +460,7 @@ def traced_method(wrapped, instance, args, kwargs):
463460
context=set_span_in_context(trace.get_current_span()),
464461
) as span:
465462

466-
for field, value in attributes.model_dump(by_alias=True).items():
467-
set_span_attribute(span, field, value)
463+
set_span_attributes(span, attributes)
468464
try:
469465
# Attempt to call the original method
470466
result = wrapped(*args, **kwargs)
@@ -521,8 +517,7 @@ async def traced_method(wrapped, instance, args, kwargs):
521517
context=set_span_in_context(trace.get_current_span()),
522518
) as span:
523519

524-
for field, value in attributes.model_dump(by_alias=True).items():
525-
set_span_attribute(span, field, value)
520+
set_span_attributes(span, attributes)
526521
try:
527522
# Attempt to call the original method
528523
result = await wrapped(*args, **kwargs)

src/langtrace_python_sdk/utils/llm.py

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -95,8 +95,11 @@ def get_llm_request_attributes(kwargs, prompts=None, model=None):
9595

9696
user = kwargs.get("user", None)
9797
if prompts is None:
98-
prompts = [{"role": user, "content": kwargs.get("prompt", [])}]
99-
98+
prompts = (
99+
[{"role": user or "user", "content": kwargs.get("prompt")}]
100+
if "prompt" in kwargs
101+
else None
102+
)
100103
top_k = (
101104
kwargs.get("n", None)
102105
or kwargs.get("k", None)
@@ -105,21 +108,21 @@ def get_llm_request_attributes(kwargs, prompts=None, model=None):
105108
)
106109

107110
top_p = kwargs.get("p", None) or kwargs.get("top_p", None)
108-
111+
tools = kwargs.get("tools", None)
109112
return {
110113
SpanAttributes.LLM_REQUEST_MODEL: model or kwargs.get("model"),
111114
SpanAttributes.LLM_IS_STREAMING: kwargs.get("stream"),
112115
SpanAttributes.LLM_REQUEST_TEMPERATURE: kwargs.get("temperature"),
113116
SpanAttributes.LLM_TOP_K: top_k,
114-
SpanAttributes.LLM_PROMPTS: json.dumps(prompts),
117+
SpanAttributes.LLM_PROMPTS: json.dumps(prompts) if prompts else None,
115118
SpanAttributes.LLM_USER: user,
116119
SpanAttributes.LLM_REQUEST_TOP_P: top_p,
117120
SpanAttributes.LLM_REQUEST_MAX_TOKENS: kwargs.get("max_tokens"),
118121
SpanAttributes.LLM_SYSTEM_FINGERPRINT: kwargs.get("system_fingerprint"),
119122
SpanAttributes.LLM_PRESENCE_PENALTY: kwargs.get("presence_penalty"),
120123
SpanAttributes.LLM_FREQUENCY_PENALTY: kwargs.get("frequency_penalty"),
121124
SpanAttributes.LLM_REQUEST_SEED: kwargs.get("seed"),
122-
SpanAttributes.LLM_TOOLS: json.dumps(kwargs.get("tools")),
125+
SpanAttributes.LLM_TOOLS: json.dumps(tools) if tools else None,
123126
SpanAttributes.LLM_REQUEST_LOGPROPS: kwargs.get("logprobs"),
124127
SpanAttributes.LLM_REQUEST_LOGITBIAS: kwargs.get("logit_bias"),
125128
SpanAttributes.LLM_REQUEST_TOP_LOGPROPS: kwargs.get("top_logprobs"),

0 commit comments

Comments
 (0)