@@ -108,7 +108,7 @@ def is_prompt_supported(
108108 model = model ,
109109 contents = chat ,
110110 config = CountTokensConfig (
111- system_instructions = system ,
111+ system_instruction = system ,
112112 ),
113113 )
114114 token_count = token_response .total_tokens
@@ -181,15 +181,15 @@ def chat_completion(
181181 config = GenerateContentConfig (
182182 system_instruction = system_content ,
183183 safety_settings = self .__SAFETY_SETTINGS ,
184- ** generation_dict ,
184+ ** NotGiven . remove_not_given ( generation_dict ) ,
185185 ),
186186 )
187187 return self .__google_response_to_openai_response (response , model )
188188
189189 @staticmethod
190190 def __google_response_to_openai_response (google_response : GenerateContentResponse , model : str ) -> ChatCompletion :
191191 choices = []
192- for candidate in google_response .candidates :
192+ for index , candidate in enumerate ( google_response .candidates ) :
193193 # note that instead of system, from openai, its model, from google.
194194 parts = [part .text or part .inline_data for part in candidate .content .parts ]
195195
@@ -202,7 +202,7 @@ def __google_response_to_openai_response(google_response: GenerateContentRespons
202202
203203 choice = Choice (
204204 finish_reason = finish_reason_map .get (candidate .finish_reason , "stop" ),
205- index = candidate . index ,
205+ index = index ,
206206 message = ChatCompletionMessage (
207207 content = "\n " .join (parts ),
208208 role = "assistant" ,
0 commit comments