Skip to content

Commit 6bf8abb

Browse files
committed
migrate to otel exporters + litellm example
1 parent b2e7773 commit 6bf8abb

File tree

4 files changed

+41
-7
lines changed

4 files changed

+41
-7
lines changed

src/examples/litellm_example/basic.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
from langtrace_python_sdk import with_langtrace_root_span, langtrace
21
from dotenv import load_dotenv
32
from litellm import completion, acompletion
43
import litellm
@@ -8,11 +7,9 @@
87

98

109
litellm.success_callback = ["langtrace"]
11-
langtrace.init()
1210
litellm.set_verbose = False
1311

1412

15-
@with_langtrace_root_span("Litellm Example OpenAI")
1613
def openAI(streaming=False):
1714
response = completion(
1815
model="gpt-3.5-turbo",
@@ -56,7 +53,6 @@ def anthropic(streaming=False):
5653
print("ERORRRR", e)
5754

5855

59-
# @with_langtrace_root_span("Litellm Example OpenAI Async Streaming")
6056
async def async_anthropic(streaming=False):
6157
response = await acompletion(
6258
model="claude-2.1",
@@ -93,6 +89,6 @@ def cohere(streaming=False):
9389

9490
if __name__ == "__main__":
9591
# openAI()
96-
anthropic(streaming=False)
92+
# anthropic(streaming=False)
9793
cohere(streaming=True)
9894
# asyncio.run(async_anthropic(streaming=True))
Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
model_list:
2+
- model_name: "gpt-4" # all requests where model not in your config go to this deployment
3+
litellm_params:
4+
model: openai/gpt-4 # set `openai/` to use the openai route
5+
6+
litellm_settings:
7+
success_callback: ["langtrace"]
8+
9+
environment_variables:
10+
LANGTRACE_API_KEY: "fake-api-key"
Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
import openai
2+
from dotenv import load_dotenv
3+
4+
load_dotenv()
5+
6+
client = openai.OpenAI(base_url="http://0.0.0.0:4000")
7+
8+
# request sent to model set on litellm proxy, `litellm --model`
9+
response = client.chat.completions.create(
10+
model="gpt-4",
11+
messages=[
12+
{"role": "user", "content": "this is a test request, write a short poem"}
13+
],
14+
)
15+
16+
print(response)

src/langtrace_python_sdk/langtrace.py

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,12 @@
3131
SimpleSpanProcessor,
3232
)
3333

34+
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import (
35+
OTLPSpanExporter as GRPCExporter,
36+
)
37+
from opentelemetry.exporter.otlp.proto.http.trace_exporter import (
38+
OTLPSpanExporter as HTTPExporter,
39+
)
3440
from langtrace_python_sdk.constants.exporter.langtrace_exporter import (
3541
LANGTRACE_REMOTE_URL,
3642
)
@@ -112,8 +118,14 @@ def get_exporter(config: LangtraceConfig, host: str):
112118
if config.custom_remote_exporter:
113119
return config.custom_remote_exporter
114120

115-
return LangTraceExporter(host, config.api_key, config.disable_logging)
116-
121+
headers = {
122+
"x-api-key": config.api_key or os.environ.get("LANGTRACE_API_KEY"),
123+
}
124+
if "http" in host.lower() or "https" in host.lower():
125+
return HTTPExporter(endpoint=host, headers=headers)
126+
else:
127+
return GRPCExporter(endpoint=host, headers=headers)
128+
117129

118130
def add_span_processor(provider: TracerProvider, config: LangtraceConfig, exporter):
119131
if config.write_spans_to_console:

0 commit comments

Comments
 (0)