Skip to content

Commit c7fe290

Browse files
Release 2.2.7 (#260)
* Add OTLP example (#259) * Adhere to otel span attributes (#257) * adhere to otel's semconv * lock attributes version * lock to 7.0.0 trace attributes * bump version --------- Co-authored-by: Karthik Kalyanaraman <[email protected]> --------- Co-authored-by: Ali Waleed <[email protected]>
1 parent 9e6077b commit c7fe290

File tree

7 files changed

+70
-44
lines changed

7 files changed

+70
-44
lines changed

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ classifiers=[
1818
"Operating System :: OS Independent",
1919
]
2020
dependencies = [
21-
'trace-attributes>=6.0.3,<7.0.0',
21+
'trace-attributes==7.0.0',
2222
'opentelemetry-api>=1.25.0',
2323
'opentelemetry-sdk>=1.25.0',
2424
'opentelemetry-instrumentation>=0.46b0',

src/examples/ollama_example/basic_example_2.py

Lines changed: 0 additions & 34 deletions
This file was deleted.
Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
# Instructions
2+
# 1. Run the OpenTelemetry Collector with the OTLP receiver enabled
3+
# Create otel-config.yaml with the following content:
4+
# receivers:
5+
# otlp:
6+
# protocols:
7+
# grpc:
8+
# endpoint: "0.0.0.0:4317"
9+
# http:
10+
# endpoint: "0.0.0.0:4318"
11+
12+
# exporters:
13+
# logging:
14+
# loglevel: debug
15+
16+
# service:
17+
# pipelines:
18+
# traces:
19+
# receivers: [otlp]
20+
# exporters: [logging]
21+
# docker pull otel/opentelemetry-collector:latest
22+
# docker run --rm -p 4317:4317 -p 4318:4318 -v $(pwd)/otel-config.yaml:/otel-config.yaml otel/opentelemetry-collector --config otel-config.yaml
23+
# 2. Run the following code
24+
25+
from langtrace_python_sdk import langtrace
26+
from openai import OpenAI
27+
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
28+
29+
30+
# Configure the OTLP exporter to use the correct endpoint and API key
31+
otlp_endpoint = "http://localhost:4318/v1/traces"
32+
otlp_exporter = OTLPSpanExporter(
33+
endpoint=otlp_endpoint,
34+
headers=(("Content-Type", "application/json"),))
35+
langtrace.init(custom_remote_exporter=otlp_exporter, batch=False)
36+
37+
38+
def chat_with_openai():
39+
client = OpenAI()
40+
messages = [
41+
{
42+
"role": "user",
43+
"content": "Hello, I'm a human.",
44+
},
45+
]
46+
chat_completion = client.chat.completions.create(
47+
messages=messages,
48+
stream=False,
49+
model="gpt-3.5-turbo",
50+
)
51+
print(chat_completion.choices[0].message.content)
52+
53+
54+
def main():
55+
chat_with_openai()
56+
57+
58+
if __name__ == "__main__":
59+
main()

src/langtrace_python_sdk/instrumentation/cohere/patch.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ def traced_method(wrapped, instance, args, kwargs):
4444

4545
span_attributes = {
4646
**get_langtrace_attributes(version, service_provider),
47-
**get_llm_request_attributes(kwargs),
47+
**get_llm_request_attributes(kwargs, operation_name="rerank"),
4848
**get_llm_url(instance),
4949
SpanAttributes.LLM_REQUEST_MODEL: kwargs.get("model") or "command-r-plus",
5050
SpanAttributes.LLM_URL: APIS["RERANK"]["URL"],
@@ -121,7 +121,7 @@ def traced_method(wrapped, instance, args, kwargs):
121121

122122
span_attributes = {
123123
**get_langtrace_attributes(version, service_provider),
124-
**get_llm_request_attributes(kwargs),
124+
**get_llm_request_attributes(kwargs, operation_name="embed"),
125125
**get_llm_url(instance),
126126
SpanAttributes.LLM_URL: APIS["EMBED"]["URL"],
127127
SpanAttributes.LLM_PATH: APIS["EMBED"]["ENDPOINT"],

src/langtrace_python_sdk/instrumentation/openai/patch.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ def traced_method(wrapped, instance, args, kwargs):
5555
service_provider = SERVICE_PROVIDERS["OPENAI"]
5656
span_attributes = {
5757
**get_langtrace_attributes(version, service_provider, vendor_type="llm"),
58-
**get_llm_request_attributes(kwargs),
58+
**get_llm_request_attributes(kwargs, operation_name="images_generate"),
5959
**get_llm_url(instance),
6060
SpanAttributes.LLM_PATH: APIS["IMAGES_GENERATION"]["ENDPOINT"],
6161
**get_extra_attributes(),
@@ -118,7 +118,7 @@ async def traced_method(wrapped, instance, args, kwargs):
118118

119119
span_attributes = {
120120
**get_langtrace_attributes(version, service_provider, vendor_type="llm"),
121-
**get_llm_request_attributes(kwargs),
121+
**get_llm_request_attributes(kwargs, operation_name="images_generate"),
122122
**get_llm_url(instance),
123123
SpanAttributes.LLM_PATH: APIS["IMAGES_GENERATION"]["ENDPOINT"],
124124
**get_extra_attributes(),
@@ -181,7 +181,7 @@ def traced_method(wrapped, instance, args, kwargs):
181181

182182
span_attributes = {
183183
**get_langtrace_attributes(version, service_provider, vendor_type="llm"),
184-
**get_llm_request_attributes(kwargs),
184+
**get_llm_request_attributes(kwargs, operation_name="images_edit"),
185185
**get_llm_url(instance),
186186
SpanAttributes.LLM_PATH: APIS["IMAGES_EDIT"]["ENDPOINT"],
187187
SpanAttributes.LLM_RESPONSE_FORMAT: kwargs.get("response_format"),
@@ -432,7 +432,7 @@ def traced_method(wrapped, instance, args, kwargs):
432432

433433
span_attributes = {
434434
**get_langtrace_attributes(version, service_provider, vendor_type="llm"),
435-
**get_llm_request_attributes(kwargs),
435+
**get_llm_request_attributes(kwargs, operation_name="embed"),
436436
**get_llm_url(instance),
437437
SpanAttributes.LLM_PATH: APIS["EMBEDDINGS_CREATE"]["ENDPOINT"],
438438
SpanAttributes.LLM_REQUEST_DIMENSIONS: kwargs.get("dimensions"),
@@ -490,7 +490,7 @@ async def traced_method(wrapped, instance, args, kwargs):
490490

491491
span_attributes = {
492492
**get_langtrace_attributes(version, service_provider, vendor_type="llm"),
493-
**get_llm_request_attributes(kwargs),
493+
**get_llm_request_attributes(kwargs, operation_name="embed"),
494494
SpanAttributes.LLM_PATH: APIS["EMBEDDINGS_CREATE"]["ENDPOINT"],
495495
SpanAttributes.LLM_REQUEST_DIMENSIONS: kwargs.get("dimensions"),
496496
**get_extra_attributes(),

src/langtrace_python_sdk/utils/llm.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ def get_langtrace_attributes(version, service_provider, vendor_type="llm"):
9292
}
9393

9494

95-
def get_llm_request_attributes(kwargs, prompts=None, model=None):
95+
def get_llm_request_attributes(kwargs, prompts=None, model=None, operation_name="chat"):
9696

9797
user = kwargs.get("user", None)
9898
if prompts is None:
@@ -111,6 +111,7 @@ def get_llm_request_attributes(kwargs, prompts=None, model=None):
111111
top_p = kwargs.get("p", None) or kwargs.get("top_p", None)
112112
tools = kwargs.get("tools", None)
113113
return {
114+
SpanAttributes.LLM_OPERATION_NAME: operation_name,
114115
SpanAttributes.LLM_REQUEST_MODEL: model or kwargs.get("model"),
115116
SpanAttributes.LLM_IS_STREAMING: kwargs.get("stream"),
116117
SpanAttributes.LLM_REQUEST_TEMPERATURE: kwargs.get("temperature"),
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
__version__ = "2.2.6"
1+
__version__ = "2.2.7"

0 commit comments

Comments
 (0)