1
- import {
2
- LlmConstrainedOutputGenerateResponse ,
3
- LlmGenerateFilesRequestOptions ,
4
- LlmGenerateTextResponse ,
5
- LlmRunner ,
6
- } from '../llm-runner.js' ;
1
+ import { LlmGenerateFilesRequestOptions , LlmRunner } from '../llm-runner.js' ;
7
2
import { join } from 'path' ;
8
3
import { mkdirSync } from 'fs' ;
9
4
import { writeFile } from 'fs/promises' ;
@@ -12,7 +7,6 @@ import {
12
7
getGeminiInstructionsFile ,
13
8
getGeminiSettingsFile ,
14
9
} from './gemini-files.js' ;
15
- import { UserFacingError } from '../../utils/errors.js' ;
16
10
import { BaseCliAgentRunner } from '../base-cli-agent-runner.js' ;
17
11
18
12
const SUPPORTED_MODELS = [ 'gemini-2.5-pro' , 'gemini-2.5-flash' , 'gemini-2.5-flash-lite' ] ;
@@ -25,17 +19,6 @@ export class GeminiCliRunner extends BaseCliAgentRunner implements LlmRunner {
25
19
protected ignoredFilePatterns = [ '**/GEMINI.md' , '**/.geminiignore' ] ;
26
20
protected binaryName = 'gemini' ;
27
21
28
- generateText ( ) : Promise < LlmGenerateTextResponse > {
29
- // Technically we can make this work, but we don't need it at the time of writing.
30
- throw new UserFacingError ( 'Generating text with Gemini CLI is not supported.' ) ;
31
- }
32
-
33
- generateConstrained ( ) : Promise < LlmConstrainedOutputGenerateResponse < any > > {
34
- // We can't support this, because there's no straightforward
35
- // way to tell the Gemini CLI to follow a schema.
36
- throw new UserFacingError ( 'Constrained output with Gemini CLI is not supported.' ) ;
37
- }
38
-
39
22
getSupportedModels ( ) : string [ ] {
40
23
return SUPPORTED_MODELS ;
41
24
}
0 commit comments