-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathfallback_example.py
More file actions
321 lines (260 loc) · 11.4 KB
/
fallback_example.py
File metadata and controls
321 lines (260 loc) · 11.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Unified interface for LLM providers using OpenAI format
# https://github.com/muxi-ai/onellm
#
# Copyright (C) 2025 Ran Aroussi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# ============================================================================ #
# OneLLM EXAMPLE: Fallback Mechanisms for Reliability
# ============================================================================ #
#
# This example demonstrates how to use OneLLM's fallback mechanisms to create
# robust applications that gracefully handle model failures.
# Key features demonstrated:
#
# - Configuring fallback models across different providers
# - Custom fallback configurations and policies
# - Fallback callbacks for monitoring and logging
# - Streaming responses with fallback support
# - Error handling with specific error types
#
# CODEBASE RELATIONSHIP:
# ----------------------
# This example leverages OneLLM's support for:
# - Fallback mechanism in ChatCompletion, Completion, and Embedding APIs
# - Error handling and retry logic
# - Custom callback integrations
# - Cross-provider compatibility
#
# RELATED EXAMPLES:
# ----------------
# - retry_example.py: Retrying with the same model before fallback
# - chat_completion_example.py: Basic chat completions without fallbacks
# - parallel_operation_example.py: Parallel processing with reliability
#
# REQUIREMENTS:
# ------------
# - OneLLM
# - OpenAI API key
# - Anthropic API key (optional, for more fallback options)
# - Cohere API key (optional, for more fallback options)
#
# EXPECTED OUTPUT:
# ---------------
# Multiple demonstrations of fallback scenarios:
# 1. Successful fallback from non-existent to valid model
# 2. Limited fallback attempts based on configuration
# 3. Fallbacks with different API types (completion, embedding)
# 4. Custom callback execution during fallback events
# 5. Streaming response with fallback support
# ============================================================================ #
"""
import asyncio
import os
import logging
from onellm import ChatCompletion, Completion, Embedding
from onellm.errors import RateLimitError
# Set up logging to see fallback messages
logging.basicConfig(level=logging.INFO)
def set_api_keys_from_env():
"""
Set API keys from environment variables.
This function retrieves API keys from environment variables for different providers
and configures them in the onellm library. It supports OpenAI, Anthropic, and Cohere.
"""
from onellm import set_api_key
# Set API keys for different providers
openai_key = os.environ.get("OPENAI_API_KEY")
if openai_key:
set_api_key(openai_key, "openai")
anthropic_key = os.environ.get("ANTHROPIC_API_KEY")
if anthropic_key:
set_api_key(anthropic_key, "anthropic")
cohere_key = os.environ.get("COHERE_API_KEY")
if cohere_key:
set_api_key(cohere_key, "cohere")
async def demonstrate_chat_completion_fallback():
"""
Demonstrate fallback for chat completions.
This function shows two scenarios:
1. Fallback from a non-existent model to a valid model
2. Custom fallback configuration with limited fallback attempts
It demonstrates how the fallback mechanism automatically switches to alternative
models when the primary model fails.
"""
print("\n=== Chat Completion Fallback Demo ===")
messages = [{"role": "user", "content": "What are three interesting facts about the moon?"}]
try:
# Scenario 1: Use a non-existent model to force fallback
print("\nScenario 1: Fallback from non-existent model to valid model")
response = await ChatCompletion.acreate(
model="openai/non-existent-model", # This will fail
messages=messages,
fallback_models=[
"openai/gpt-3.5-turbo", # This should work
"anthropic/claude-3-haiku", # Backup if the first fallback also fails
],
fallback_config={"log_fallbacks": True}, # Enable logging of fallback events
)
print("✅ Success! Response from fallback model:")
print(f"Content: {response.choices[0].message['content'][:150]}...")
except Exception as e:
print(f"❌ All fallbacks failed: {e}")
try:
# Scenario 2: Custom fallback configuration
print("\nScenario 2: Custom fallback configuration with max_fallbacks=1")
response = await ChatCompletion.acreate(
model="openai/non-existent-model-1", # This will fail
messages=messages,
fallback_models=[
"openai/non-existent-model-2", # This will also fail
# This would work but won't be tried due to max_fallbacks=1:
"openai/gpt-3.5-turbo",
],
fallback_config={
"max_fallbacks": 1, # Only try the first fallback
"log_fallbacks": True, # Enable logging of fallback events
},
)
print("✅ Success! Response from fallback model:")
print(f"Content: {response.choices[0].message['content'][:150]}...")
except Exception as e:
print(f"❌ Expected failure due to max_fallbacks=1: {e}")
async def demonstrate_completion_fallback():
"""
Demonstrate fallback for text completions.
This function shows how to use fallbacks with the Completion API, including
configuring specific error types (RateLimitError) that should trigger fallbacks.
"""
print("\n=== Text Completion Fallback Demo ===")
prompt = "Write a haiku about programming:"
try:
response = await Completion.acreate(
model="openai/non-existent-model", # This will fail
prompt=prompt,
fallback_models=["openai/gpt-3.5-turbo-instruct"], # This should work
fallback_config={
"log_fallbacks": True,
"retriable_errors": [RateLimitError], # Only retry on rate limit errors
},
)
print("✅ Success! Response from fallback model:")
print(f"Text: {response.choices[0].text}")
except Exception as e:
print(f"❌ All fallbacks failed: {e}")
async def demonstrate_embedding_fallback():
"""
Demonstrate fallback for embeddings.
This function shows how to use fallbacks with the Embedding API, which is useful
for ensuring reliable vector embeddings even when the primary embedding model fails.
"""
print("\n=== Embedding Fallback Demo ===")
texts = ["The quick brown fox jumps over the lazy dog"]
try:
response = await Embedding.acreate(
model="openai/non-existent-embedding-model", # This will fail
input=texts,
fallback_models=["openai/text-embedding-ada-002"], # This should work
fallback_config={"log_fallbacks": True}, # Enable logging of fallback events
)
print("✅ Success! Got embeddings from fallback model:")
print(f"Embedding dimensions: {len(response.data[0].embedding)}")
except Exception as e:
print(f"❌ All fallbacks failed: {e}")
async def custom_fallback_callback(primary_model: str, fallback_model: str, error: Exception):
"""
Example callback function when fallbacks are used.
This function is called whenever a fallback occurs, providing information about
the primary model that failed, the fallback model being used, and the error that
triggered the fallback.
Args:
primary_model: The original model that failed
fallback_model: The fallback model being used instead
error: The exception that caused the primary model to fail
"""
print("\n🔄 Fallback callback triggered:")
print(f" - Primary model: {primary_model}")
print(f" - Fallback model used: {fallback_model}")
print(f" - Error from primary model: {type(error).__name__}: {str(error)}")
# You could send metrics, log to a monitoring system, or take other actions here
async def demonstrate_fallback_callback():
"""
Demonstrate using a callback when fallbacks occur.
This function shows how to register a custom callback function that will be
invoked whenever a fallback is triggered, allowing for custom logging, metrics,
or other actions.
"""
print("\n=== Fallback Callback Demo ===")
messages = [{"role": "user", "content": "What's your favorite programming language?"}]
try:
response = await ChatCompletion.acreate(
model="openai/non-existent-model", # This will fail
messages=messages,
fallback_models=["openai/gpt-3.5-turbo"], # This should work
fallback_config={
"log_fallbacks": True,
"fallback_callback": custom_fallback_callback, # Register our custom callback
},
)
print("✅ Success! Response from fallback model:")
print(f"Content: {response.choices[0].message['content'][:150]}...")
except Exception as e:
print(f"❌ All fallbacks failed: {e}")
async def demonstrate_streaming_fallback():
"""
Demonstrate fallback for streaming responses.
This function shows how fallbacks work with streaming responses, which is important
for applications that need to display results incrementally as they are generated.
"""
print("\n=== Streaming Fallback Demo ===")
messages = [{"role": "user", "content": "Count from 1 to 5 slowly."}]
try:
print("\nStarting streaming with fallback:")
stream = await ChatCompletion.acreate(
model="openai/non-existent-model", # This will fail
messages=messages,
stream=True, # Important: Enable streaming
fallback_models=["openai/gpt-3.5-turbo"], # This should work
fallback_config={"log_fallbacks": True}, # Enable logging of fallback events
)
# Process the streaming response chunks
response_text = ""
async for chunk in stream:
# Extract content from each chunk if available
if chunk.choices and chunk.choices[0].delta.get("content"):
content = chunk.choices[0].delta["content"]
response_text += content # Accumulate the full response
print(content, end="", flush=True) # Print immediately without newlines
print("\n✅ Streaming complete!")
except Exception as e:
print(f"\n❌ All fallbacks failed: {e}")
async def run_demos():
"""
Run all the demonstration functions.
This function serves as the main entry point for the example, setting up API keys
and executing all the demonstration functions in sequence.
"""
# Set API keys
set_api_keys_from_env()
# Run demos
await demonstrate_chat_completion_fallback()
await demonstrate_completion_fallback()
await demonstrate_embedding_fallback()
await demonstrate_fallback_callback()
await demonstrate_streaming_fallback()
if __name__ == "__main__":
asyncio.run(run_demos())