@@ -129,22 +129,22 @@ def __adapt_input_messages(self, messages: Iterable[ChatCompletionMessageParam])
129129 return new_messages
130130
131131 def __adapt_chat_completion_request (
132- self ,
133- messages : Iterable [ChatCompletionMessageParam ],
134- model : str ,
135- frequency_penalty : Optional [float ] | NotGiven = NOT_GIVEN ,
136- logit_bias : Optional [Dict [str , int ]] | NotGiven = NOT_GIVEN ,
137- logprobs : Optional [bool ] | NotGiven = NOT_GIVEN ,
138- max_tokens : Optional [int ] | NotGiven = NOT_GIVEN ,
139- n : Optional [int ] | NotGiven = NOT_GIVEN ,
140- presence_penalty : Optional [float ] | NotGiven = NOT_GIVEN ,
141- response_format : completion_create_params .ResponseFormat | NotGiven = NOT_GIVEN ,
142- stop : Union [Optional [str ], List [str ]] | NotGiven = NOT_GIVEN ,
143- temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
144- tools : Iterable [ChatCompletionToolParam ] | NotGiven = NOT_GIVEN ,
145- tool_choice : ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN ,
146- top_logprobs : Optional [int ] | NotGiven = NOT_GIVEN ,
147- top_p : Optional [float ] | NotGiven = NOT_GIVEN ,
132+ self ,
133+ messages : Iterable [ChatCompletionMessageParam ],
134+ model : str ,
135+ frequency_penalty : Optional [float ] | NotGiven = NOT_GIVEN ,
136+ logit_bias : Optional [Dict [str , int ]] | NotGiven = NOT_GIVEN ,
137+ logprobs : Optional [bool ] | NotGiven = NOT_GIVEN ,
138+ max_tokens : Optional [int ] | NotGiven = NOT_GIVEN ,
139+ n : Optional [int ] | NotGiven = NOT_GIVEN ,
140+ presence_penalty : Optional [float ] | NotGiven = NOT_GIVEN ,
141+ response_format : completion_create_params .ResponseFormat | NotGiven = NOT_GIVEN ,
142+ stop : Union [Optional [str ], List [str ]] | NotGiven = NOT_GIVEN ,
143+ temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
144+ tools : Iterable [ChatCompletionToolParam ] | NotGiven = NOT_GIVEN ,
145+ tool_choice : ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN ,
146+ top_logprobs : Optional [int ] | NotGiven = NOT_GIVEN ,
147+ top_p : Optional [float ] | NotGiven = NOT_GIVEN ,
148148 ):
149149 system : Union [str , Iterable [TextBlockParam ]] | NotGiven = NOT_GIVEN
150150 adapted_messages = self .__adapt_input_messages (messages )
@@ -208,22 +208,22 @@ def is_model_supported(self, model: str) -> bool:
208208 return model in self .__definitely_allowed_models or model .startswith (self .__allowed_model_prefix )
209209
210210 def is_prompt_supported (
211- self ,
212- messages : Iterable [ChatCompletionMessageParam ],
213- model : str ,
214- frequency_penalty : Optional [float ] | NotGiven = NOT_GIVEN ,
215- logit_bias : Optional [Dict [str , int ]] | NotGiven = NOT_GIVEN ,
216- logprobs : Optional [bool ] | NotGiven = NOT_GIVEN ,
217- max_tokens : Optional [int ] | NotGiven = NOT_GIVEN ,
218- n : Optional [int ] | NotGiven = NOT_GIVEN ,
219- presence_penalty : Optional [float ] | NotGiven = NOT_GIVEN ,
220- response_format : completion_create_params .ResponseFormat | NotGiven = NOT_GIVEN ,
221- stop : Union [Optional [str ], List [str ]] | NotGiven = NOT_GIVEN ,
222- temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
223- tools : Iterable [ChatCompletionToolParam ] | NotGiven = NOT_GIVEN ,
224- tool_choice : ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN ,
225- top_logprobs : Optional [int ] | NotGiven = NOT_GIVEN ,
226- top_p : Optional [float ] | NotGiven = NOT_GIVEN ,
211+ self ,
212+ messages : Iterable [ChatCompletionMessageParam ],
213+ model : str ,
214+ frequency_penalty : Optional [float ] | NotGiven = NOT_GIVEN ,
215+ logit_bias : Optional [Dict [str , int ]] | NotGiven = NOT_GIVEN ,
216+ logprobs : Optional [bool ] | NotGiven = NOT_GIVEN ,
217+ max_tokens : Optional [int ] | NotGiven = NOT_GIVEN ,
218+ n : Optional [int ] | NotGiven = NOT_GIVEN ,
219+ presence_penalty : Optional [float ] | NotGiven = NOT_GIVEN ,
220+ response_format : completion_create_params .ResponseFormat | NotGiven = NOT_GIVEN ,
221+ stop : Union [Optional [str ], List [str ]] | NotGiven = NOT_GIVEN ,
222+ temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
223+ tools : Iterable [ChatCompletionToolParam ] | NotGiven = NOT_GIVEN ,
224+ tool_choice : ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN ,
225+ top_logprobs : Optional [int ] | NotGiven = NOT_GIVEN ,
226+ top_p : Optional [float ] | NotGiven = NOT_GIVEN ,
227227 ) -> int :
228228 model_limit = self .__get_model_limit (model )
229229 input_kwargs = self .__adapt_chat_completion_request (
@@ -252,27 +252,27 @@ def is_prompt_supported(
252252 return model_limit - message_token_count .input_tokens
253253
254254 def truncate_messages (
255- self , messages : Iterable [ChatCompletionMessageParam ], model : str
255+ self , messages : Iterable [ChatCompletionMessageParam ], model : str
256256 ) -> Iterable [ChatCompletionMessageParam ]:
257257 return self ._truncate_messages (self , messages , model )
258258
259259 def chat_completion (
260- self ,
261- messages : Iterable [ChatCompletionMessageParam ],
262- model : str ,
263- frequency_penalty : Optional [float ] | NotGiven = NOT_GIVEN ,
264- logit_bias : Optional [Dict [str , int ]] | NotGiven = NOT_GIVEN ,
265- logprobs : Optional [bool ] | NotGiven = NOT_GIVEN ,
266- max_tokens : Optional [int ] | NotGiven = NOT_GIVEN ,
267- n : Optional [int ] | NotGiven = NOT_GIVEN ,
268- presence_penalty : Optional [float ] | NotGiven = NOT_GIVEN ,
269- response_format : completion_create_params .ResponseFormat | NotGiven = NOT_GIVEN ,
270- stop : Union [Optional [str ], List [str ]] | NotGiven = NOT_GIVEN ,
271- temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
272- tools : Iterable [ChatCompletionToolParam ] | NotGiven = NOT_GIVEN ,
273- tool_choice : ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN ,
274- top_logprobs : Optional [int ] | NotGiven = NOT_GIVEN ,
275- top_p : Optional [float ] | NotGiven = NOT_GIVEN ,
260+ self ,
261+ messages : Iterable [ChatCompletionMessageParam ],
262+ model : str ,
263+ frequency_penalty : Optional [float ] | NotGiven = NOT_GIVEN ,
264+ logit_bias : Optional [Dict [str , int ]] | NotGiven = NOT_GIVEN ,
265+ logprobs : Optional [bool ] | NotGiven = NOT_GIVEN ,
266+ max_tokens : Optional [int ] | NotGiven = NOT_GIVEN ,
267+ n : Optional [int ] | NotGiven = NOT_GIVEN ,
268+ presence_penalty : Optional [float ] | NotGiven = NOT_GIVEN ,
269+ response_format : completion_create_params .ResponseFormat | NotGiven = NOT_GIVEN ,
270+ stop : Union [Optional [str ], List [str ]] | NotGiven = NOT_GIVEN ,
271+ temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
272+ tools : Iterable [ChatCompletionToolParam ] | NotGiven = NOT_GIVEN ,
273+ tool_choice : ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN ,
274+ top_logprobs : Optional [int ] | NotGiven = NOT_GIVEN ,
275+ top_p : Optional [float ] | NotGiven = NOT_GIVEN ,
276276 ) -> ChatCompletion :
277277 input_kwargs = self .__adapt_chat_completion_request (
278278 messages = messages ,
0 commit comments