@@ -57,6 +57,7 @@ async def openai_client(request):
5757 if client_type == "direct" :
5858 openai_client = AsyncOpenAI (
5959 api_key = os .environ ["OPENAI_API_KEY" ],
60+ base_url = os .environ .get ("OPENAI_BASE_URL" ),
6061 )
6162 yield openai_client
6263 else :
@@ -80,7 +81,7 @@ async def test_llm_generate_response():
8081 id = "test_provider" ,
8182 type = "openai" ,
8283 name = "myopenai" ,
83- base_url = " https://api.openai.com/v1" ,
84+ base_url = os . environ . get ( "OPENAI_BASE_URL" , " https://api.openai.com/v1") ,
8485 api_key = os .environ ["OPENAI_API_KEY" ],
8586 properties = {},
8687 created_at = None ,
@@ -106,8 +107,9 @@ async def test_llm_generate_response():
106107 request = Mock (spec = Request )
107108
108109 # Test non-streaming
110+ model = os .environ .get ("OPENAI_MODEL" , "gpt-5" )
109111 body_json = {
110- "model" : "myopenai/gpt-4o " ,
112+ "model" : f "myopenai/{ model } " ,
111113 "input" : [{"role" : "user" , "content" : "Just echo the word 'Hello'" }],
112114 }
113115
@@ -149,7 +151,7 @@ async def test_llm_generate_response_streaming():
149151 id = "test_provider" ,
150152 type = "openai" ,
151153 name = "myopenai" ,
152- base_url = " https://api.openai.com/v1" ,
154+ base_url = os . environ . get ( "OPENAI_BASE_URL" , " https://api.openai.com/v1") ,
153155 api_key = os .environ ["OPENAI_API_KEY" ],
154156 properties = {},
155157 created_at = None ,
@@ -175,8 +177,9 @@ async def test_llm_generate_response_streaming():
175177 request = Mock (spec = Request )
176178
177179 # Test streaming
180+ model = os .environ .get ("OPENAI_MODEL" , "gpt-5" )
178181 body_json = {
179- "model" : "myopenai/gpt-4o " ,
182+ "model" : f "myopenai/{ model } " ,
180183 "input" : [{"role" : "user" , "content" : "Just echo the word 'Hello'" }],
181184 "stream" : True ,
182185 }
@@ -209,7 +212,7 @@ async def test_chat_responses_api(openai_client: AsyncOpenAI, request):
209212 """Test chat responses API."""
210213
211214 request .node .callspec .params ["openai_client" ]
212- model = " gpt-4o" # No backend_proxy
215+ model = os . environ . get ( "OPENAI_MODEL" , " gpt-5" )
213216
214217 # Make the request
215218 response = await openai_client .responses .create (
@@ -241,7 +244,7 @@ async def test_chat_responses_api_streaming(openai_client: AsyncOpenAI, request)
241244 """Test streaming chat responses API."""
242245
243246 request .node .callspec .params ["openai_client" ]
244- model = " gpt-4o" # No backend_proxy
247+ model = os . environ . get ( "OPENAI_MODEL" , " gpt-5" )
245248
246249 # Make the streaming request
247250 stream = await openai_client .responses .create (
@@ -443,7 +446,7 @@ async def test_openai_llm_provider_not_found():
443446
444447 # Test with non-existent provider
445448 body_json = {
446- "model" : "nonexistent/gpt-4 " ,
449+ "model" : "nonexistent/gpt-5 " ,
447450 "input" : [{"role" : "user" , "content" : "Hello" }],
448451 }
449452
0 commit comments