File tree Expand file tree Collapse file tree 2 files changed +3
-3
lines changed Expand file tree Collapse file tree 2 files changed +3
-3
lines changed Original file line number Diff line number Diff line change 15
15
# Configure basic logging
16
16
logging .basicConfig (level = logging .INFO )
17
17
18
- default_openai_base_url = "https://api.openai.com/v1/"
18
+ default_openai_base_url = "https://api.openai.com/v1/chat/completions "
19
19
20
20
# Set the environment variables for the chat model
21
- LLM_URL = os .getenv ("LLM_URL" , default_openai_base_url ) + "chat/completions"
21
+ LLM_URL = os .getenv ("LLM_URL" , default_openai_base_url )
22
22
# Fallback to OpenAI Model if not set in environment
23
23
MODEL_ID = os .getenv ("LLM_MODEL" , "gpt-4-turbo" )
24
24
Original file line number Diff line number Diff line change @@ -7,7 +7,7 @@ services:
7
7
- " 8000:8000"
8
8
restart : always
9
9
environment :
10
- - ENDPOINT_URL =http://llm/api/v1/chat/completions # endpoint to the gateway service
10
+ - LLM_URL =http://llm/api/v1/chat/completions # endpoint to the gateway service
11
11
- MODEL=default # LLM model ID used for the gateway.
12
12
# For other models, see https://docs.defang.io/docs/concepts/managed-llms/openai-access-gateway#model-mapping
13
13
- OPENAI_API_KEY=FAKE_TOKEN # the actual value will be ignored when using the gateway, but it should match the one in the llm service
You can’t perform that action at this time.
0 commit comments