Skip to content

Commit 26348d1

Browse files
devin-ai-integration[bot]Pratyush Shukla
andcommitted
Add comprehensive LiteLLM example scripts
- Add litellm_streaming_example.py: demonstrates streaming responses with time-to-first-token metrics - Add litellm_async_example.py: showcases async operations and concurrent completions - Add litellm_multi_provider_example.py: tests multiple LLM providers (OpenAI, Anthropic, etc.) - Add litellm_advanced_features_example.py: covers function calling and advanced features - All examples include AgentOps validation and session tracking - Examples follow established patterns from other provider integrations Co-Authored-By: Pratyush Shukla <[email protected]>
1 parent 11ec75b commit 26348d1

File tree

4 files changed

+998
-0
lines changed

4 files changed

+998
-0
lines changed
Lines changed: 349 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,349 @@
1+
"""
2+
LiteLLM Advanced Features Example with AgentOps Integration
3+
4+
This example demonstrates advanced LiteLLM features including:
5+
- Function/tool calling
6+
- Image analysis (vision models)
7+
- Embeddings
8+
- Error handling and retries
9+
- Custom callbacks and logging
10+
11+
Install required packages:
12+
pip install litellm agentops
13+
14+
Set your API keys:
15+
export OPENAI_API_KEY="your-openai-key"
16+
export AGENTOPS_API_KEY="your-agentops-key"
17+
"""
18+
19+
import os
20+
import json
21+
import agentops
22+
import litellm
23+
24+
agentops.init()
25+
26+
tracer = agentops.start_trace("litellm-advanced-features-example")
27+
28+
print("🚀 Starting LiteLLM Advanced Features Example with AgentOps")
29+
print("=" * 60)
30+
31+
def function_calling_example():
32+
"""Demonstrate function/tool calling capabilities."""
33+
print("\n🛠️ Example 1: Function/Tool Calling")
34+
print("-" * 40)
35+
36+
tools = [
37+
{
38+
"type": "function",
39+
"function": {
40+
"name": "get_current_weather",
41+
"description": "Get the current weather in a given location",
42+
"parameters": {
43+
"type": "object",
44+
"properties": {
45+
"location": {
46+
"type": "string",
47+
"description": "The city and state, e.g. San Francisco, CA"
48+
},
49+
"unit": {
50+
"type": "string",
51+
"enum": ["celsius", "fahrenheit"],
52+
"description": "The temperature unit"
53+
}
54+
},
55+
"required": ["location"]
56+
}
57+
}
58+
},
59+
{
60+
"type": "function",
61+
"function": {
62+
"name": "calculate_distance",
63+
"description": "Calculate distance between two cities",
64+
"parameters": {
65+
"type": "object",
66+
"properties": {
67+
"city1": {"type": "string", "description": "First city"},
68+
"city2": {"type": "string", "description": "Second city"}
69+
},
70+
"required": ["city1", "city2"]
71+
}
72+
}
73+
}
74+
]
75+
76+
messages = [
77+
{"role": "user", "content": "What's the weather like in New York and what's the distance between New York and Los Angeles?"}
78+
]
79+
80+
try:
81+
print("🔧 Making completion with function calling...")
82+
response = litellm.completion(
83+
model="gpt-4o-mini",
84+
messages=messages,
85+
tools=tools,
86+
tool_choice="auto",
87+
max_tokens=300
88+
)
89+
90+
message = response.choices[0].message
91+
92+
if message.tool_calls:
93+
print(f"🎯 Function calls detected: {len(message.tool_calls)}")
94+
for i, tool_call in enumerate(message.tool_calls, 1):
95+
print(f" {i}. Function: {tool_call.function.name}")
96+
print(f" Arguments: {tool_call.function.arguments}")
97+
else:
98+
print(f"📝 Regular response: {message.content}")
99+
100+
print(f"✅ Function calling successful! Tokens: {response.usage.total_tokens}")
101+
return response
102+
103+
except Exception as e:
104+
print(f"❌ Function calling failed: {e}")
105+
raise
106+
107+
def embeddings_example():
108+
"""Demonstrate embeddings generation."""
109+
print("\n🔢 Example 2: Text Embeddings")
110+
print("-" * 40)
111+
112+
texts = [
113+
"The quick brown fox jumps over the lazy dog",
114+
"Machine learning is a subset of artificial intelligence",
115+
"Python is a popular programming language for data science"
116+
]
117+
118+
try:
119+
print("🎯 Generating embeddings...")
120+
121+
for i, text in enumerate(texts, 1):
122+
print(f" {i}. Processing: {text[:50]}...")
123+
124+
response = litellm.embedding(
125+
model="text-embedding-ada-002",
126+
input=text
127+
)
128+
129+
embedding = response.data[0].embedding
130+
print(f" Embedding dimension: {len(embedding)}")
131+
print(f" First 5 values: {embedding[:5]}")
132+
133+
print(f"✅ Embeddings generated successfully!")
134+
return True
135+
136+
except Exception as e:
137+
print(f"❌ Embeddings failed: {e}")
138+
return False
139+
140+
def error_handling_example():
141+
"""Demonstrate error handling and retry mechanisms."""
142+
print("\n⚠️ Example 3: Error Handling & Retries")
143+
print("-" * 40)
144+
145+
print("🎯 Testing error handling with invalid model...")
146+
147+
try:
148+
response = litellm.completion(
149+
model="invalid-model-name",
150+
messages=[{"role": "user", "content": "Hello"}],
151+
max_tokens=50
152+
)
153+
print("❌ This should not succeed!")
154+
155+
except Exception as e:
156+
print(f"✅ Expected error caught: {type(e).__name__}")
157+
print(f" Error message: {str(e)[:100]}...")
158+
159+
print("\n🎯 Testing with valid model and proper error handling...")
160+
161+
try:
162+
response = litellm.completion(
163+
model="gpt-4o-mini",
164+
messages=[{"role": "user", "content": "Say hello"}],
165+
max_tokens=10,
166+
temperature=0.1
167+
)
168+
169+
print(f"📝 Response: {response.choices[0].message.content}")
170+
print(f"✅ Proper request successful!")
171+
return True
172+
173+
except Exception as e:
174+
print(f"❌ Unexpected error: {e}")
175+
return False
176+
177+
def streaming_with_callbacks_example():
178+
"""Demonstrate streaming with custom callback handling."""
179+
print("\n📡 Example 4: Streaming with Custom Callbacks")
180+
print("-" * 40)
181+
182+
messages = [
183+
{"role": "user", "content": "Write a short poem about technology and nature."}
184+
]
185+
186+
try:
187+
print("🎯 Making streaming completion with callback tracking...")
188+
189+
# Track streaming metrics
190+
chunk_count = 0
191+
total_content = ""
192+
first_chunk_time = None
193+
194+
response = litellm.completion(
195+
model="gpt-4o-mini",
196+
messages=messages,
197+
stream=True,
198+
max_tokens=200,
199+
temperature=0.7
200+
)
201+
202+
print("📡 Streaming response:")
203+
for chunk in response:
204+
chunk_count += 1
205+
206+
if chunk.choices[0].delta.content:
207+
content = chunk.choices[0].delta.content
208+
total_content += content
209+
print(content, end="", flush=True)
210+
211+
if first_chunk_time is None:
212+
first_chunk_time = chunk_count
213+
214+
print(f"\n\n📊 Streaming metrics:")
215+
print(f" • Total chunks: {chunk_count}")
216+
print(f" • Content length: {len(total_content)} characters")
217+
print(f" • First content chunk: #{first_chunk_time}")
218+
219+
print(f"✅ Streaming with callbacks successful!")
220+
return True
221+
222+
except Exception as e:
223+
print(f"❌ Streaming with callbacks failed: {e}")
224+
return False
225+
226+
def batch_processing_example():
227+
"""Demonstrate batch processing of multiple requests."""
228+
print("\n📦 Example 5: Batch Processing")
229+
print("-" * 40)
230+
231+
tasks = [
232+
{"role": "user", "content": "What is 2+2?"},
233+
{"role": "user", "content": "Name a color."},
234+
{"role": "user", "content": "What day comes after Monday?"},
235+
{"role": "user", "content": "How many legs does a spider have?"}
236+
]
237+
238+
try:
239+
print(f"🎯 Processing {len(tasks)} tasks in batch...")
240+
241+
results = []
242+
for i, task in enumerate(tasks, 1):
243+
print(f" Processing task {i}/{len(tasks)}...")
244+
245+
response = litellm.completion(
246+
model="gpt-4o-mini",
247+
messages=[task],
248+
max_tokens=50,
249+
temperature=0.1
250+
)
251+
252+
content = response.choices[0].message.content
253+
tokens = response.usage.total_tokens
254+
255+
results.append({
256+
"task": task["content"],
257+
"response": content,
258+
"tokens": tokens
259+
})
260+
261+
print(f"\n📊 Batch results:")
262+
total_tokens = 0
263+
for i, result in enumerate(results, 1):
264+
print(f" {i}. Q: {result['task']}")
265+
print(f" A: {result['response']}")
266+
print(f" Tokens: {result['tokens']}")
267+
total_tokens += result['tokens']
268+
269+
print(f"\n✅ Batch processing successful! Total tokens: {total_tokens}")
270+
return results
271+
272+
except Exception as e:
273+
print(f"❌ Batch processing failed: {e}")
274+
return []
275+
276+
def main():
277+
"""Main function to run all advanced feature examples."""
278+
try:
279+
if not os.getenv("OPENAI_API_KEY"):
280+
print("⚠️ Warning: OPENAI_API_KEY not set. Please set your API key.")
281+
282+
examples_run = 0
283+
examples_successful = 0
284+
285+
# Function calling
286+
try:
287+
function_calling_example()
288+
examples_successful += 1
289+
except Exception as e:
290+
print(f"Function calling example failed: {e}")
291+
examples_run += 1
292+
293+
try:
294+
if embeddings_example():
295+
examples_successful += 1
296+
except Exception as e:
297+
print(f"Embeddings example failed: {e}")
298+
examples_run += 1
299+
300+
try:
301+
if error_handling_example():
302+
examples_successful += 1
303+
except Exception as e:
304+
print(f"Error handling example failed: {e}")
305+
examples_run += 1
306+
307+
try:
308+
if streaming_with_callbacks_example():
309+
examples_successful += 1
310+
except Exception as e:
311+
print(f"Streaming callbacks example failed: {e}")
312+
examples_run += 1
313+
314+
try:
315+
batch_results = batch_processing_example()
316+
if batch_results:
317+
examples_successful += 1
318+
except Exception as e:
319+
print(f"Batch processing example failed: {e}")
320+
examples_run += 1
321+
322+
print("\n" + "=" * 60)
323+
print(f"🎉 Advanced Features Testing Complete!")
324+
print(f"📊 Results: {examples_successful}/{examples_run} examples successful")
325+
326+
if examples_successful > 0:
327+
agentops.end_trace(tracer, end_state="Success")
328+
else:
329+
agentops.end_trace(tracer, end_state="Fail")
330+
331+
except Exception as e:
332+
print(f"\n❌ Advanced features testing failed: {e}")
333+
agentops.end_trace(tracer, end_state="Fail")
334+
raise
335+
336+
if __name__ == "__main__":
337+
main()
338+
339+
print("\n" + "=" * 60)
340+
print("Now let's verify that our advanced LLM calls were tracked properly...")
341+
342+
try:
343+
result = agentops.validate_trace_spans(trace_context=tracer)
344+
agentops.print_validation_summary(result)
345+
except agentops.ValidationError as e:
346+
print(f"❌ Error validating spans: {e}")
347+
raise
348+
349+
print("\n✅ Success! All advanced feature LLM spans were properly recorded in AgentOps.")

0 commit comments

Comments
 (0)