55from pprint import pprint
66import os
77import asyncio
8+ import asyncio
89from dotenv import load_dotenv
910load_dotenv ()
1011
11- def run_provider (provider , model , api_key = None , ** kwargs ):
12+ def run_provider (provider , model , api_key = None = None , ** kwargs ):
13+ print (f"\n \n ###RUNNING for <{ provider } >, <{ model } > ###" )
1214 print (f"\n \n ###RUNNING for <{ provider } >, <{ model } > ###" )
1315 llm = LLMCore (provider = provider , api_key = api_key , ** kwargs )
1416
@@ -58,7 +60,7 @@ def run_provider(provider, model, api_key=None, **kwargs):
5860
5961 print ("\n Async Stream" )
6062 async def async_stream ():
61- chat_request = build_chat_request (model , chat_input = "Hello, my name is Tom Json " , is_stream = True )
63+ chat_request = build_chat_request (model , chat_input = "Hello, my name is Tom" , is_stream = True )
6264
6365 response_async = await llm .achat (** chat_request )
6466 async for p in response_async :
@@ -74,15 +76,15 @@ async def async_stream():
7476
7577
7678 print ("\n Sync Non-Stream" )
77- chat_request = build_chat_request (model , chat_input = "Hello, my name is Alice Json " , is_stream = False )
79+ chat_request = build_chat_request (model , chat_input = "Hello, my name is Alice" , is_stream = False )
7880
7981 response_sync = llm .chat (** chat_request )
8082 pprint (response_sync )
8183 latencies ["sync (ms)" ]= response_sync .metrics ["latency_s" ]* 1000
8284
8385
8486 print ("\n Sync Stream" )
85- chat_request = build_chat_request (model , chat_input = "Hello, my name is Mary Json " , is_stream = True )
87+ chat_request = build_chat_request (model , chat_input = "Hello, my name is Mary" , is_stream = True )
8688
8789 response_sync_stream = llm .chat (** chat_request )
8890 for p in response_sync_stream :
@@ -126,7 +128,6 @@ def build_chat_request(model: str, chat_input: str, is_stream: bool, max_tokens:
126128 "parameters" : {
127129 "temperature" : 0 ,
128130 "max_tokens" : max_tokens ,
129- # "response_format": {"type": "json_object"},
130131 "functions" : None ,
131132 }
132133 }
@@ -138,35 +139,75 @@ def multiple_provider_runs(provider:str, model:str, num_runs:int, api_key:str, *
138139 latencies = run_provider (provider = provider , model = model , api_key = api_key , ** kwargs )
139140 pprint (latencies )
140141
141-
142- # Self-Hosted
143- multiple_provider_runs (provider = "self-hosted" ,
144- model = "deepseek-r1:1.5b" ,
145- api_key = os .environ ["API_KEY" ],
146- base_url = os .environ ["BASE_URL" ],
147- num_runs = 1 )
142+ def run_chat_all_providers ():
143+ # OpenAI
144+ multiple_provider_runs (provider = "openai" , model = "gpt-4o-mini" , api_key = os .environ ["OPENAI_API_KEY" ], num_runs = 1 )
145+ multiple_provider_runs (provider = "openai" , model = "o3-mini" , api_key = os .environ ["OPENAI_API_KEY" ], num_runs = 1 )
146+ #multiple_provider_runs(provider="openai", model="o1-preview", api_key=os.environ["OPENAI_API_KEY"], num_runs=1)
147+
148148
149- # OpenAI
150- # multiple_provider_runs(provider="openai", model="gpt-4o-mini", api_key=os.environ["OPENAI_API_KEY"], num_runs=1)
151- # multiple_provider_runs(provider="openai", model="o3-mini", api_key=os.environ["OPENAI_API_KEY"], num_runs=1)
152- #multiple_provider_runs(provider="openai", model="o1-preview", api_key=os.environ["OPENAI_API_KEY"], num_runs=1)
149+ # Azure
150+ multiple_provider_runs (provider = "azure" , model = "gpt-4o-mini" , num_runs = 1 , api_key = os .environ ["AZURE_API_KEY" ], api_version = os .environ ["AZURE_API_VERSION" ], api_endpoint = os .environ ["AZURE_API_ENDPOINT" ])
151+ #multiple_provider_runs(provider="azure", model="gpt-4o", num_runs=1, api_key=os.environ["AZURE_API_KEY"], api_version=os.environ["AZURE_API_VERSION"], api_endpoint=os.environ["AZURE_API_ENDPOINT"])
152+ #multiple_provider_runs(provider="azure", model="o1-mini", num_runs=1, api_key=os.environ["AZURE_API_KEY"], api_version=os.environ["AZURE_API_VERSION"], api_endpoint=os.environ["AZURE_API_ENDPOINT"])
153+ #multiple_provider_runs(provider="azure", model="o1-preview", num_runs=1, api_key=os.environ["AZURE_API_KEY"], api_version=os.environ["AZURE_API_VERSION"], api_endpoint=os.environ["AZURE_API_ENDPOINT"])
153154
154155
155- # Azure
156- # multiple_provider_runs(provider="azure", model="gpt-4o-mini", num_runs=1, api_key=os.environ["AZURE_API_KEY"], api_version=os.environ["AZURE_API_VERSION"], api_endpoint=os.environ["AZURE_API_ENDPOINT"])
157- #multiple_provider_runs(provider="azure", model="gpt-4o", num_runs=1, api_key=os.environ["AZURE_API_KEY"], api_version=os.environ["AZURE_API_VERSION"], api_endpoint=os.environ["AZURE_API_ENDPOINT"])
158- #multiple_provider_runs(provider="azure", model="o1-mini", num_runs=1, api_key=os.environ["AZURE_API_KEY"], api_version=os.environ["AZURE_API_VERSION"], api_endpoint=os.environ["AZURE_API_ENDPOINT"])
159- #multiple_provider_runs(provider="azure", model="o1-preview", num_runs=1, api_key=os.environ["AZURE_API_KEY"], api_version=os.environ["AZURE_API_VERSION"], api_endpoint=os.environ["AZURE_API_ENDPOINT"])
156+ #multiple_provider_runs(provider="anthropic", model="claude-3-opus-20240229", num_runs=1, api_key=os.environ["ANTHROPIC_API_KEY"])
160157
158+ #multiple_provider_runs(provider="azure", model="o1-preview", num_runs=1, api_key=os.environ["AZURE_API_KEY"], api_version=os.environ["AZURE_API_VERSION"], api_endpoint=os.environ["AZURE_API_ENDPOINT"])
159+ #multiple_provider_runs(provider="azure", model="o1-mini", num_runs=1, api_key=os.environ["AZURE_API_KEY"], api_version=os.environ["AZURE_API_VERSION"], api_endpoint=os.environ["AZURE_API_ENDPOINT"])
161160
162- #multiple_provider_runs(provider="anthropic", model="claude-3-opus-20240229", num_runs=1, api_key=os.environ["ANTHROPIC_API_KEY"])
163161
164- #multiple_provider_runs(provider="azure", model="o1-preview", num_runs=1, api_key=os.environ["AZURE_API_KEY"], api_version=os.environ["AZURE_API_VERSION"], api_endpoint=os.environ["AZURE_API_ENDPOINT"])
165- #multiple_provider_runs(provider="azure", model="o1-mini", num_runs=1, api_key=os.environ["AZURE_API_KEY"], api_version=os.environ["AZURE_API_VERSION"], api_endpoint=os.environ["AZURE_API_ENDPOINT"])
162+ multiple_provider_runs (provider = "vertexai" , model = "gemini-1.5-flash" , num_runs = 1 , api_key = os .environ ["GOOGLE_API_KEY" ])
166163
164+ # Bedrock
165+ multiple_provider_runs (provider = "bedrock" , model = "us.amazon.nova-lite-v1:0" , num_runs = 1 , api_key = None , region = os .environ ["BEDROCK_REGION" ], secret_key = os .environ ["BEDROCK_SECRET_KEY" ], access_key = os .environ ["BEDROCK_ACCESS_KEY" ])
166+ #multiple_provider_runs(provider="bedrock", model="anthropic.claude-3-5-sonnet-20241022-v2:0", num_runs=1, api_key=None, region=os.environ["BEDROCK_REGION"], secret_key=os.environ["BEDROCK_SECRET_KEY"], access_key=os.environ["BEDROCK_ACCESS_KEY"])
167167
168- # multiple_provider_runs(provider="vertexai", model="gemini-1.5-flash", num_runs=1, api_key=os.environ["GOOGLE_API_KEY"] )
168+ run_chat_all_providers ( )
169169
170- # Bedrock
171- # multiple_provider_runs(provider="bedrock", model="us.amazon.nova-lite-v1:0", num_runs=1, api_key=None, region=os.environ["BEDROCK_REGION"], secret_key=os.environ["BEDROCK_SECRET_KEY"], access_key=os.environ["BEDROCK_ACCESS_KEY"])
172- #multiple_provider_runs(provider="bedrock", model="anthropic.claude-3-5-sonnet-20241022-v2:0", num_runs=1, api_key=None, region=os.environ["BEDROCK_REGION"], secret_key=os.environ["BEDROCK_SECRET_KEY"], access_key=os.environ["BEDROCK_ACCESS_KEY"])
170+
171+ import base64
172+
173+ def messages (img_path ):
174+ """
175+ Creates a message payload with both text and image.
176+ Adapts format based on the provider.
177+ """
178+ with open (img_path , "rb" ) as f :
179+ image_bytes = f .read ()
180+
181+ base64_image = base64 .b64encode (image_bytes ).decode ("utf-8" )
182+ return [
183+ {
184+ "role" : "user" ,
185+ "content" : [
186+ {"type" : "text" , "text" : "What's in this image?" },
187+ {
188+ "type" : "image_url" ,
189+ "image_url" : {"url" : f"data:image/jpeg;base64,{ base64_image } " },
190+ },
191+ {
192+ "type" : "image_url" ,
193+ "image_url" : {"url" : "https://awsmp-logos.s3.amazonaws.com/seller-zx4pk43qpmxoa/53d235806f343cec94aac3c577d81c13.png" },
194+ },
195+ ],
196+ }
197+ ]
198+
199+ def run_send_imgs ():
200+ provider = "bedrock"
201+ model = "us.amazon.nova-lite-v1:0"
202+ chat_input = messages (img_path = "./libs/llmstudio/tests/integration_tests/test_data/llmstudio-logo.jpeg" )
203+ chat_request = build_chat_request (model = model , chat_input = chat_input , is_stream = False )
204+ llm = LLMCore (provider = provider , api_key = os .environ ["OPENAI_API_KEY" ], region = os .environ ["BEDROCK_REGION" ], secret_key = os .environ ["BEDROCK_SECRET_KEY" ], access_key = os .environ ["BEDROCK_ACCESS_KEY" ])
205+ response_sync = llm .chat (** chat_request )
206+ #print(response_sync)
207+ response_sync .clean_print ()
208+
209+ #for p in response_sync:
210+ # if p.metrics:
211+ # p.clean_print()
212+
213+ run_send_imgs ()
0 commit comments