@@ -4,18 +4,28 @@ import {
44 GenerativeModel ,
55 GoogleGenerativeAI ,
66} from "@google/generative-ai" ;
7+ import * as vscode from "vscode" ;
8+ import { Orchestrator } from "../../agents/orchestrator" ;
9+ import { ProcessInputResult } from "../../application/interfaces/agent.interface" ;
10+ import { createPrompt } from "../../utils/prompt" ;
711import { BaseLLM } from "../base" ;
812import { GeminiModelResponseType , ILlmConfig } from "../interface" ;
913
10- export class GeminiLLM extends BaseLLM < GeminiModelResponseType > {
14+ export class GeminiLLM
15+ extends BaseLLM < GeminiModelResponseType >
16+ implements vscode . Disposable
17+ {
1118 private readonly generativeAi : GoogleGenerativeAI ;
1219 private response : EmbedContentResponse | GenerateContentResult | undefined ;
20+ protected readonly orchestrator : Orchestrator ;
21+ private readonly disposables : vscode . Disposable [ ] = [ ] ;
1322
1423 constructor ( config : ILlmConfig ) {
1524 super ( config ) ;
1625 this . config = config ;
1726 this . generativeAi = new GoogleGenerativeAI ( this . config . apiKey ) ;
1827 this . response = undefined ;
28+ this . orchestrator = Orchestrator . getInstance ( ) ;
1929 }
2030
2131 public async generateEmbeddings ( text : string ) : Promise < number [ ] > {
@@ -42,7 +52,7 @@ export class GeminiLLM extends BaseLLM<GeminiModelResponseType> {
4252 }
4353 }
4454
45- private getModel ( ) : GenerativeModel {
55+ getModel ( ) : GenerativeModel {
4656 try {
4757 const model : GenerativeModel | undefined =
4858 this . generativeAi . getGenerativeModel ( {
@@ -59,11 +69,47 @@ export class GeminiLLM extends BaseLLM<GeminiModelResponseType> {
5969 }
6070 }
6171
72+ async generateContent (
73+ userInput : string ,
74+ ) : Promise < Partial < ProcessInputResult > > {
75+ try {
76+ const prompt = createPrompt ( userInput ) ;
77+ const model = this . getModel ( ) ;
78+ const generateContentResponse : GenerateContentResult =
79+ await model . generateContent ( prompt ) ;
80+ const { text, usageMetadata } = generateContentResponse . response ;
81+ const parsedResponse = this . orchestrator . parseResponse ( text ( ) ) ;
82+ const extractedQueries = parsedResponse . queries ;
83+ const extractedThought = parsedResponse . thought ;
84+ const tokenCount = usageMetadata ?. totalTokenCount ?? 0 ;
85+ const result = {
86+ queries : extractedQueries ,
87+ tokens : tokenCount ,
88+ prompt : userInput ,
89+ thought : extractedThought ,
90+ } ;
91+ this . orchestrator . publish ( "onQuery" , JSON . stringify ( result ) ) ;
92+ return result ;
93+ } catch ( error : any ) {
94+ this . orchestrator . publish ( "onError" , error ) ;
95+ vscode . window . showErrorMessage ( "Error processing user query" ) ;
96+ this . logger . error (
97+ "Error generating, queries, thoughts from user query" ,
98+ error ,
99+ ) ;
100+ throw error ;
101+ }
102+ }
103+
62104 public createSnapShot ( data ?: any ) : GeminiModelResponseType {
63105 return { ...this . response , ...data } ;
64106 }
65107
66108 public loadSnapShot ( snapshot : ReturnType < typeof this . createSnapShot > ) : void {
67109 Object . assign ( this , snapshot ) ;
68110 }
111+
112+ public dispose ( ) : void {
113+ this . disposables . forEach ( ( d ) => d . dispose ( ) ) ;
114+ }
69115}
0 commit comments