2727import octobot_trading .api as trading_api
2828import octobot_services .api as services_api
2929import octobot_services .errors as services_errors
30- import tentacles .Services .Services_bases .gpt_service as gpt_service
30+ import tentacles .Services .Services_bases
31+
32+
33+ def _get_gpt_service ():
34+ try :
35+ return tentacles .Services .Services_bases .GPTService
36+ except (AttributeError , ImportError ):
37+ raise ImportError ("the gpt_service tentacle is not installed" )
3138
3239
3340class GPTEvaluator (evaluators .TAEvaluator ):
@@ -58,7 +65,7 @@ def __init__(self, tentacles_setup_config):
5865 self .source = None
5966 self .period = None
6067 self .min_confidence_threshold = 100
61- self .gpt_model = gpt_service . GPTService .DEFAULT_MODEL
68+ self .gpt_model = _get_gpt_service () .DEFAULT_MODEL
6269 self .is_backtesting = False
6370 self .min_allowed_timeframe = os .getenv ("MIN_GPT_TIMEFRAME" , None )
6471 self .enable_model_selector = os_util .parse_boolean_environment_var ("ENABLE_GPT_MODELS_SELECTOR" , "True" )
@@ -70,7 +77,7 @@ def __init__(self, tentacles_setup_config):
7077 except ValueError :
7178 self .logger .error (f"Invalid timeframe configuration: unknown timeframe: '{ self .min_allowed_timeframe } '" )
7279 self .allow_reevaluations = os_util .parse_boolean_environment_var (self .ALLOW_GPT_REEVALUATION_ENV , "True" )
73- self .gpt_tokens_limit = gpt_service . GPTService .NO_TOKEN_LIMIT_VALUE
80+ self .gpt_tokens_limit = _get_gpt_service () .NO_TOKEN_LIMIT_VALUE
7481 self .services_config = None
7582
7683 def enable_reevaluation (self ) -> bool :
@@ -93,7 +100,7 @@ async def load_and_save_user_inputs(self, bot_id: str) -> dict:
93100 :return: the filled user input configuration
94101 """
95102 self .is_backtesting = self ._is_in_backtesting ()
96- if self .is_backtesting and not gpt_service . GPTService .BACKTESTING_ENABLED :
103+ if self .is_backtesting and not _get_gpt_service () .BACKTESTING_ENABLED :
97104 self .logger .error (f"{ self .get_name ()} is disabled in backtesting. It will only emit neutral evaluations" )
98105 await self ._init_GPT_models ()
99106 return await super ().load_and_save_user_inputs (bot_id )
@@ -122,10 +129,10 @@ def init_user_inputs(self, inputs: dict) -> None:
122129 if self .enable_model_selector :
123130 current_value = self .specific_config .get ("GPT_model" )
124131 models = list (self .GPT_MODELS ) or (
125- [current_value ] if current_value else [gpt_service . GPTService .DEFAULT_MODEL ]
132+ [current_value ] if current_value else [_get_gpt_service () .DEFAULT_MODEL ]
126133 )
127134 self .gpt_model = self .UI .user_input (
128- "GPT model" , enums .UserInputTypes .OPTIONS , gpt_service . GPTService .DEFAULT_MODEL ,
135+ "GPT model" , enums .UserInputTypes .OPTIONS , _get_gpt_service () .DEFAULT_MODEL ,
129136 inputs , options = sorted (models ),
130137 title = "GPT Model: the GPT model to use. Enable the evaluator to load other models."
131138 )
@@ -140,18 +147,18 @@ def init_user_inputs(self, inputs: dict) -> None:
140147 if self .ALLOW_TOKEN_LIMIT_UPDATE :
141148 self .gpt_tokens_limit = self .UI .user_input (
142149 "max_gpt_tokens" , enums .UserInputTypes .INT ,
143- self .gpt_tokens_limit , inputs , min_val = gpt_service . GPTService .NO_TOKEN_LIMIT_VALUE ,
150+ self .gpt_tokens_limit , inputs , min_val = _get_gpt_service () .NO_TOKEN_LIMIT_VALUE ,
144151 title = f"OpenAI token limit: maximum daily number of tokens to consume with a given OctoBot instance. "
145- f"Use { gpt_service . GPTService .NO_TOKEN_LIMIT_VALUE } to remove the limit."
152+ f"Use { _get_gpt_service () .NO_TOKEN_LIMIT_VALUE } to remove the limit."
146153 )
147154
148155 async def _init_GPT_models (self ):
149156 if not self .GPT_MODELS :
150- self .GPT_MODELS = [gpt_service . GPTService .DEFAULT_MODEL ]
157+ self .GPT_MODELS = [_get_gpt_service () .DEFAULT_MODEL ]
151158 if self .enable_model_selector and not self .is_backtesting :
152159 try :
153160 service = await services_api .get_service (
154- gpt_service . GPTService , self .is_backtesting , self .services_config
161+ _get_gpt_service () , self .is_backtesting , self .services_config
155162 )
156163 self .GPT_MODELS = service .models
157164 self .ALLOW_TOKEN_LIMIT_UPDATE = service .allow_token_limit_update ()
@@ -226,7 +233,7 @@ def get_formatted_data(self, computed_data) -> str:
226233 async def ask_gpt (self , preprompt , inputs , symbol , time_frame , candle_time ) -> str :
227234 try :
228235 service = await services_api .get_service (
229- gpt_service . GPTService ,
236+ _get_gpt_service () ,
230237 self .is_backtesting ,
231238 {} if self .is_backtesting else self .services_config
232239 )
0 commit comments