1515# License along with this library.
1616import asyncio
1717import os
18+ import uuid
19+
1820import openai
1921import logging
2022import datetime
2628
2729import octobot_commons .constants as commons_constants
2830import octobot_commons .enums as commons_enums
31+ import octobot_commons .logging as commons_logging
2932import octobot_commons .time_frame_manager as time_frame_manager
3033import octobot_commons .authentication as authentication
3134import octobot_commons .tree as tree
3841octobot_services .util .patch_openai_proxies ()
3942
4043
44+ NO_SYSTEM_PROMPT_MODELS = [
45+ "o1-mini" ,
46+ ]
47+ MINIMAL_PARAMS_MODELS = [
48+ "o1-mini" ,
49+ ]
50+ SYSTEM = "system"
51+ USER = "user"
52+
53+
4154class GPTService (services .AbstractService ):
4255 BACKTESTING_ENABLED = True
4356 DEFAULT_MODEL = "gpt-3.5-turbo"
@@ -47,7 +60,10 @@ def get_fields_description(self):
4760 if self ._env_secret_key is None :
4861 return {
4962 services_constants .CONIG_OPENAI_SECRET_KEY : "Your openai API secret key" ,
50- services_constants .CONIG_LLM_CUSTOM_BASE_URL : "Custom LLM base url to use. Leave empty to use openai.com" ,
63+ services_constants .CONIG_LLM_CUSTOM_BASE_URL : (
64+ "Custom LLM base url to use. Leave empty to use openai.com. For Ollama models, "
65+ "add /v1 to the url (such as: http://localhost:11434/v1)"
66+ ),
5167 }
5268 return {}
5369
@@ -75,7 +91,12 @@ def __init__(self):
7591 self .last_consumed_token_date = None
7692
7793 @staticmethod
78- def create_message (role , content ):
94+ def create_message (role , content , model : str = None ):
95+ if role == SYSTEM and model in NO_SYSTEM_PROMPT_MODELS :
96+ commons_logging .get_logger (GPTService .__name__ ).debug (
97+ f"Overriding prompt to use { USER } instead of { SYSTEM } for { model } "
98+ )
99+ return {"role" : USER , "content" : content }
79100 return {"role" : role , "content" : content }
80101
81102 async def get_chat_completion (
@@ -124,12 +145,18 @@ async def _get_signal_from_gpt(
124145 self ._ensure_rate_limit ()
125146 try :
126147 model = model or self .model
148+ supports_params = model not in MINIMAL_PARAMS_MODELS
149+ if not supports_params :
150+ self .logger .info (
151+ f"The { model } model does not support every required parameter, results might not be as accurate "
152+ f"as with other models."
153+ )
127154 completions = await self ._get_client ().chat .completions .create (
128155 model = model ,
129- max_tokens = max_tokens ,
156+ max_tokens = max_tokens if supports_params else openai . NOT_GIVEN ,
130157 n = n ,
131158 stop = stop ,
132- temperature = temperature ,
159+ temperature = temperature if supports_params else openai . NOT_GIVEN ,
133160 messages = messages
134161 )
135162 self ._update_token_usage (completions .usage .total_tokens )
@@ -138,9 +165,15 @@ async def _get_signal_from_gpt(
138165 openai .BadRequestError , # error in request
139166 openai .UnprocessableEntityError # error in model (ex: model not found)
140167 )as err :
141- raise errors .InvalidRequestError (
142- f"Error when running request with model { model } (invalid request): { err } "
143- ) from err
168+ if "does not support 'system' with this model" in str (err ):
169+ desc = err .body .get ("message" , str (err ))
170+ err_message = (
171+ f"The \" { model } \" model can't be used with { SYSTEM } prompts. "
172+ f"It should be added to NO_SYSTEM_PROMPT_MODELS: { desc } "
173+ )
174+ else :
175+ err_message = f"Error when running request with model { model } (invalid request): { err } "
176+ raise errors .InvalidRequestError (err_message ) from err
144177 except openai .AuthenticationError as err :
145178 self .logger .error (f"Invalid OpenAI api key: { err } " )
146179 self .creation_error_message = err
@@ -284,7 +317,7 @@ def _update_token_usage(self, consumed_tokens):
284317 self .logger .debug (f"Consumed { consumed_tokens } tokens. { self .consumed_daily_tokens } consumed tokens today." )
285318
286319 def check_required_config (self , config ):
287- if self ._env_secret_key is not None or self .use_stored_signals_only ():
320+ if self ._env_secret_key is not None or self .use_stored_signals_only () or self . _get_base_url () :
288321 return True
289322 try :
290323 config_key = config [services_constants .CONIG_OPENAI_SECRET_KEY ]
@@ -319,10 +352,18 @@ def get_logo(self):
319352 return "https://upload.wikimedia.org/wikipedia/commons/0/04/ChatGPT_logo.svg"
320353
321354 def _get_api_key (self ):
322- return self ._env_secret_key or \
323- self .config [services_constants .CONFIG_CATEGORY_SERVICES ][services_constants .CONFIG_GPT ][
324- services_constants .CONIG_OPENAI_SECRET_KEY
325- ]
355+ key = (
356+ self ._env_secret_key or
357+ self .config [services_constants .CONFIG_CATEGORY_SERVICES ][services_constants .CONFIG_GPT ].get (
358+ services_constants .CONIG_OPENAI_SECRET_KEY , None
359+ )
360+ )
361+ if key and not fields_utils .has_invalid_default_config_value (key ):
362+ return key
363+ if self ._get_base_url ():
364+ # no key and custom base url: use random key
365+ return uuid .uuid4 ().hex
366+ return key
326367
327368 def _get_base_url (self ):
328369 value = self .config [services_constants .CONFIG_CATEGORY_SERVICES ][services_constants .CONFIG_GPT ].get (
@@ -337,20 +378,28 @@ async def prepare(self) -> None:
337378 if self .use_stored_signals_only ():
338379 self .logger .info (f"Skipping GPT - OpenAI models fetch as self.use_stored_signals_only() is True" )
339380 return
381+ if self ._get_base_url ():
382+ self .logger .info (f"Using custom LLM url: { self ._get_base_url ()} " )
340383 fetched_models = await self ._get_client ().models .list ()
341384 self .models = [d .id for d in fetched_models .data ]
342385 if self .model not in self .models :
343- self .logger .warning (
344- f"Warning: the default '{ self .model } ' model is not in available LLM models from the "
345- f"selected LLM provider. "
346- f"Available models are: { self .models } . Please select an available model when configuring your "
347- f"evaluators."
348- )
386+ if self ._get_base_url ():
387+ self .logger .info (
388+ f"Custom LLM available models are: { self .models } . "
389+ f"Please select one of those in your evaluator configuration."
390+ )
391+ else :
392+ self .logger .warning (
393+ f"Warning: the default '{ self .model } ' model is not in available LLM models from the "
394+ f"selected LLM provider. "
395+ f"Available models are: { self .models } . Please select an available model when configuring your "
396+ f"evaluators."
397+ )
349398 except openai .AuthenticationError as err :
350399 self .logger .error (f"Invalid OpenAI api key: { err } " )
351400 self .creation_error_message = err
352401 except Exception as err :
353- self .logger .error ( f"Unexpected error when checking api key : { err } " )
402+ self .logger .exception ( err , True , f"Unexpected error when initializing GPT service : { err } " )
354403
355404 def _is_healthy (self ):
356405 return self .use_stored_signals_only () or (self ._get_api_key () and self .models )
0 commit comments