Skip to content

Commit 143b7bd

Browse files
committed
clean up
1 parent 4aa92c9 commit 143b7bd

File tree

8 files changed

+57
-268
lines changed

8 files changed

+57
-268
lines changed

src/examples/crewai_example/trip_planner/agents.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -12,9 +12,9 @@ class TravelAgents:
1212
def __init__(self):
1313
self.OpenAIGPT35 = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.7)
1414
self.OpenAIGPT4 = ChatOpenAI(model_name="gpt-4", temperature=0.7)
15-
self.Ollama = ChatOllama(model="openhermes")
15+
self.Ollama = ChatOllama(model="llama3")
1616
self.Cohere = ChatCohere(model="command-r")
17-
self.Anthropic = ChatAnthropic(model="claude-3-5-sonnet")
17+
self.Anthropic = ChatAnthropic(model="claude-3-5-sonnet-20240620")
1818

1919
def expert_travel_agent(self):
2020
return Agent(
@@ -28,7 +28,7 @@ def expert_travel_agent(self):
2828
# tools=[tool_1, tool_2],
2929
allow_delegation=False,
3030
verbose=True,
31-
llm=self.OpenAIGPT4,
31+
llm=self.Cohere,
3232
)
3333

3434
def city_selection_expert(self):
@@ -39,7 +39,7 @@ def city_selection_expert(self):
3939
# tools=[tool_1, tool_2],
4040
allow_delegation=False,
4141
verbose=True,
42-
llm=self.OpenAIGPT4,
42+
llm=self.Cohere,
4343
)
4444

4545
def local_tour_guide(self):
@@ -50,5 +50,5 @@ def local_tour_guide(self):
5050
# tools=[tool_1, tool_2],
5151
allow_delegation=False,
5252
verbose=True,
53-
llm=self.OpenAIGPT4,
53+
llm=self.Cohere,
5454
)

src/examples/litellm_example/basic.py

Lines changed: 48 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -2,63 +2,73 @@
22
from dotenv import load_dotenv
33
from litellm import completion, acompletion
44
import litellm
5+
import asyncio
56

6-
litellm.set_verbose = False
77
load_dotenv()
8-
langtrace.init(write_spans_to_console=True)
8+
9+
10+
litellm.success_callback = ["langtrace"]
11+
langtrace.init()
12+
litellm.set_verbose = False
913

1014

1115
@with_langtrace_root_span("Litellm Example OpenAI")
1216
def openAI():
1317
response = completion(
1418
model="gpt-3.5-turbo",
15-
messages=[{"content": "Hello, how are you?", "role": "user"}],
19+
messages=[
20+
{"content": "respond only in Yoda speak.", "role": "system"},
21+
{"content": "Hello, how are you?", "role": "user"},
22+
],
1623
)
1724
return response
1825

1926

20-
@with_langtrace_root_span("Litellm Example Anthropic Completion")
21-
def anthropic():
22-
response = completion(
23-
model="claude-2",
24-
messages=[{"content": "Hello, how are you?", "role": "user"}],
25-
temperature=0.5,
26-
top_p=0.5,
27-
n=1,
28-
)
29-
print(response)
30-
return response
27+
# @with_langtrace_root_span("Litellm Example Anthropic Completion")
28+
def anthropic(streaming=False):
29+
try:
3130

31+
response = completion(
32+
model="claude-2",
33+
messages=[
34+
{"content": "respond only in Yoda speak.", "role": "system"},
35+
{"content": "what is 2 + 2?", "role": "user"},
36+
],
37+
temperature=0.5,
38+
top_p=0.5,
39+
n=1,
40+
stream=streaming,
41+
stream_options={"include_usage": True},
42+
)
43+
# print(response)
44+
if streaming:
45+
for _ in response:
46+
pass
47+
else:
48+
return response
49+
except Exception as e:
50+
print("ERORRRR", e)
3251

33-
@with_langtrace_root_span("Litellm Example Anthropic Streaming")
34-
def anthropic_streaming():
35-
response = completion(
36-
model="claude-2",
37-
messages=[{"content": "Hello, how are you?", "role": "user"}],
38-
stream=True,
39-
temperature=0.5,
40-
# presence_penalty=0.5,
41-
# frequency_penalty=0.5,
42-
top_p=0.5,
43-
n=1,
44-
# logit_bias={"Hello": 1.0},
45-
# top_logprobs=1,
46-
)
47-
for _ in response:
48-
pass
49-
50-
return response
5152

52-
53-
@with_langtrace_root_span("Litellm Example OpenAI Async Streaming")
54-
async def async_anthropic_streaming():
53+
# @with_langtrace_root_span("Litellm Example OpenAI Async Streaming")
54+
async def async_anthropic(streaming=False):
5555
response = await acompletion(
5656
model="claude-2",
5757
messages=[{"content": "Hello, how are you?", "role": "user"}],
58-
stream=True,
58+
stream=streaming,
59+
stream_options={"include_usage": True},
5960
temperature=0.5,
6061
top_p=0.5,
6162
n=1,
6263
)
63-
async for _ in response:
64-
pass
64+
if streaming:
65+
async for _ in response:
66+
pass
67+
else:
68+
return response
69+
70+
71+
if __name__ == "__main__":
72+
# openAI()
73+
# anthropic(streaming=False)
74+
asyncio.run(async_anthropic(streaming=True))

src/langtrace_python_sdk/constants/instrumentation/common.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,6 @@
2727
"QDRANT": "Qdrant",
2828
"WEAVIATE": "Weaviate",
2929
"OLLAMA": "Ollama",
30-
"LITELLM": "Litellm",
3130
"VERTEXAI": "VertexAI",
3231
"GEMINI": "Gemini",
3332
}

src/langtrace_python_sdk/instrumentation/__init__.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@
1414
from .weaviate import WeaviateInstrumentation
1515
from .ollama import OllamaInstrumentor
1616
from .dspy import DspyInstrumentation
17-
from .litellm import LiteLLMInstrumentation
1817
from .vertexai import VertexAIInstrumentation
1918
from .gemini import GeminiInstrumentation
2019

@@ -35,7 +34,6 @@
3534
"WeaviateInstrumentation",
3635
"OllamaInstrumentor",
3736
"DspyInstrumentation",
38-
"LiteLLMInstrumentation",
3937
"VertexAIInstrumentation",
4038
"GeminiInstrumentation",
4139
]

src/langtrace_python_sdk/instrumentation/litellm/__init__.py

Lines changed: 0 additions & 4 deletions
This file was deleted.

src/langtrace_python_sdk/instrumentation/litellm/instrumentation.py

Lines changed: 0 additions & 66 deletions
This file was deleted.

src/langtrace_python_sdk/instrumentation/litellm/patch.py

Lines changed: 0 additions & 148 deletions
This file was deleted.

0 commit comments

Comments
 (0)