@@ -65,6 +65,13 @@ export class GeminiCliHandler extends BaseProvider implements SingleCompletionHa
6565 }
6666 }
6767
68+ /**
69+ * Creates a streaming message response using the Gemini CLI client.
70+ * @param systemInstruction - System prompt to guide the model's behavior
71+ * @param messages - Array of conversation messages
72+ * @param metadata - Optional metadata for the API call
73+ * @yields Stream of response chunks including text, reasoning, and usage data
74+ */
6875 async * createMessage (
6976 systemInstruction : string ,
7077 messages : Anthropic . Messages . MessageParam [ ] ,
@@ -76,7 +83,7 @@ export class GeminiCliHandler extends BaseProvider implements SingleCompletionHa
7683 throw new Error ( "Gemini CLI client not initialized" )
7784 }
7885
79- const { id : model , info, maxTokens } = this . getModel ( )
86+ const { id : model , info } = this . getModel ( )
8087 const contents = messages . map ( convertAnthropicMessageToGemini )
8188
8289 try {
@@ -114,7 +121,7 @@ export class GeminiCliHandler extends BaseProvider implements SingleCompletionHa
114121
115122 for await ( const event of stream ) {
116123 // The stream returns Turn objects at the end
117- turnResult = event
124+ turnResult = event as any
118125
119126 // Handle content events
120127 if ( event . type === "content" && event . value ) {
@@ -170,6 +177,10 @@ export class GeminiCliHandler extends BaseProvider implements SingleCompletionHa
170177 }
171178 }
172179
180+ /**
181+ * Gets the model configuration for the current provider settings.
182+ * @returns Model ID and information including pricing and capabilities
183+ */
173184 override getModel ( ) {
174185 const modelId = this . options . apiModelId
175186 let id = modelId && modelId in geminiCliModels ? ( modelId as GeminiCliModelId ) : geminiCliDefaultModelId
@@ -179,6 +190,11 @@ export class GeminiCliHandler extends BaseProvider implements SingleCompletionHa
179190 return { id, info, ...params }
180191 }
181192
193+ /**
194+ * Completes a single prompt without streaming.
195+ * @param prompt - The prompt text to complete
196+ * @returns The completed text response
197+ */
182198 async completePrompt ( prompt : string ) : Promise < string > {
183199 await this . ensureInitialized ( )
184200
@@ -232,6 +248,13 @@ export class GeminiCliHandler extends BaseProvider implements SingleCompletionHa
232248 }
233249 }
234250
251+ /**
252+ * Counts tokens for the given content blocks.
253+ * Note: The Gemini CLI library doesn't expose a direct token counting method,
254+ * so this falls back to the base implementation using tiktoken.
255+ * @param content - Array of content blocks to count tokens for
256+ * @returns The estimated token count
257+ */
235258 override async countTokens ( content : Array < Anthropic . Messages . ContentBlockParam > ) : Promise < number > {
236259 // The Gemini CLI library doesn't expose a direct token counting method
237260 // Fall back to the base implementation
0 commit comments