@@ -159,8 +159,9 @@ def is_finished(self, response: LLMResult) -> bool:
159159 if finish_reason is not None :
160160 # OpenAI uses "stop"
161161 # Vertex AI uses "STOP" or "MAX_TOKENS"
162+ # WatsonX AI uses "eos_token"
162163 is_finished_list .append (
163- finish_reason in ["stop" , "STOP" , "MAX_TOKENS" ]
164+ finish_reason in ["stop" , "STOP" , "MAX_TOKENS" , "eos_token" ]
164165 )
165166
166167 # provied more conditions here
@@ -177,12 +178,12 @@ def is_finished(self, response: LLMResult) -> bool:
177178 if resp_message .response_metadata .get ("finish_reason" ) is not None :
178179 finish_reason = resp_message .response_metadata .get ("finish_reason" )
179180 is_finished_list .append (
180- finish_reason in ["stop" , "STOP" , "MAX_TOKENS" ]
181+ finish_reason in ["stop" , "STOP" , "MAX_TOKENS" , "eos_token" ]
181182 )
182183 elif resp_message .response_metadata .get ("stop_reason" ) is not None :
183184 stop_reason = resp_message .response_metadata .get ("stop_reason" )
184185 is_finished_list .append (
185- stop_reason in ["end_turn" , "stop" , "STOP" , "MAX_TOKENS" ]
186+ stop_reason in ["end_turn" , "stop" , "STOP" , "MAX_TOKENS" , "eos_token" ]
186187 )
187188 # default to True
188189 else :
0 commit comments