1+ import {
2+ LLMRequest ,
3+ LLMResponse ,
4+ ModelsResponse ,
5+ LLMOptions ,
6+ LLMServiceError ,
7+ LLMConnectionError ,
8+ LLMAuthenticationError ,
9+ LLMTimeoutError ,
10+ LLMParsingError
11+ } from '../types/llm' ;
12+
13+ interface LLMClientConfig {
14+ baseUrl : string ;
15+ username : string ;
16+ password : string ;
17+ defaultModel : string ;
18+ timeout : number ;
19+ maxRetries : number ;
20+ }
21+
22+ class LLMClient {
23+ private config : LLMClientConfig ;
24+ private authHeader : string ;
25+
26+ constructor ( ) {
27+ this . config = {
28+ baseUrl : process . env . LLM_BASE_URL || 'http://10.10.248.41' ,
29+ username : process . env . LLM_USERNAME || 'student1' ,
30+ password : process . env . LLM_PASSWORD || 'pass123' ,
31+ defaultModel : process . env . LLM_MODEL || 'llama3.1:8b' ,
32+ timeout : parseInt ( process . env . LLM_TIMEOUT || '30000' ) ,
33+ maxRetries : parseInt ( process . env . LLM_MAX_RETRIES || '3' )
34+ } ;
35+
36+ const credentials = Buffer . from ( `${ this . config . username } :${ this . config . password } ` ) . toString ( 'base64' ) ;
37+ this . authHeader = `Basic ${ credentials } ` ;
38+ }
39+
40+ /**
41+ * Generate response from LLM
42+ */
43+ async generateResponse ( prompt : string , options ?: LLMOptions ) : Promise < LLMResponse > {
44+ const request : LLMRequest = {
45+ model : this . config . defaultModel ,
46+ prompt : prompt . trim ( ) ,
47+ stream : false ,
48+ format : options ?. format || 'json' ,
49+ options : {
50+ temperature : options ?. temperature || 0.7 ,
51+ top_p : options ?. top_p || 0.9 ,
52+ num_predict : options ?. num_predict || 1000 ,
53+ ...options
54+ }
55+ } ;
56+
57+ return this . makeRequest ( '/api/generate' , 'POST' , request ) ;
58+ }
59+
60+ /**
61+ * List available models
62+ */
63+ async listAvailableModels ( ) : Promise < ModelsResponse > {
64+ return this . makeRequest ( '/api/tags' , 'GET' ) ;
65+ }
66+
67+ /**
68+ * Health check for the LLM service
69+ */
70+ async isHealthy ( ) : Promise < boolean > {
71+ try {
72+ const response = await this . makeRequest ( '/api/tags' , 'GET' ) ;
73+ return response && typeof response === 'object' ;
74+ } catch ( error ) {
75+ console . error ( 'LLM health check failed:' , error ) ;
76+ return false ;
77+ }
78+ }
79+
80+ /**
81+ * Test connection with a simple prompt
82+ */
83+ async testConnection ( ) : Promise < { success : boolean ; message : string } > {
84+ try {
85+ const response = await this . generateResponse ( 'Hello' , {
86+ temperature : 0.1 ,
87+ num_predict : 50
88+ } ) ;
89+
90+ if ( response . success && response . response ) {
91+ return {
92+ success : true ,
93+ message : 'Connection successful'
94+ } ;
95+ } else {
96+ return {
97+ success : false ,
98+ message : 'Invalid response format'
99+ } ;
100+ }
101+ } catch ( error ) {
102+ return {
103+ success : false ,
104+ message : error instanceof Error ? error . message : 'Unknown error'
105+ } ;
106+ }
107+ }
108+
109+
110+ private async makeRequest ( endpoint : string , method : 'GET' | 'POST' , body ?: any ) : Promise < any > {
111+ const url = `${ this . config . baseUrl . replace ( / \/ $ / , '' ) } ${ endpoint } ` ;
112+
113+ for ( let attempt = 1 ; attempt <= this . config . maxRetries ; attempt ++ ) {
114+ try {
115+ const controller = new AbortController ( ) ;
116+ const timeoutId = setTimeout ( ( ) => controller . abort ( ) , this . config . timeout ) ;
117+
118+ const requestInit : RequestInit = {
119+ method,
120+ headers : {
121+ 'Authorization' : this . authHeader ,
122+ 'Content-Type' : 'application/json' ,
123+ } ,
124+ signal : controller . signal ,
125+ } ;
126+
127+ if ( method === 'POST' && body ) {
128+ requestInit . body = JSON . stringify ( body ) ;
129+ }
130+
131+ const response = await fetch ( url , requestInit ) ;
132+ clearTimeout ( timeoutId ) ;
133+
134+
135+ if ( ! response . ok ) {
136+ await this . handleErrorResponse ( response , attempt ) ;
137+ }
138+
139+ // Parse JSON response
140+ try {
141+ return await response . json ( ) ;
142+ } catch ( parseError ) {
143+ throw new LLMParsingError (
144+ 'Failed to parse JSON response' ,
145+ parseError instanceof Error ? parseError : undefined
146+ ) ;
147+ }
148+
149+ } catch ( error ) {
150+ if ( attempt < this . config . maxRetries ) {
151+ console . log ( `got error, attempt ${ attempt } - got error ${ error } ` , ) ;
152+ await this . delay ( Math . pow ( 2 , attempt ) * 1000 ) ;
153+ continue ;
154+ } else {
155+ // Handle network/timeout errors
156+ if ( error instanceof DOMException && error . name === 'AbortError' ) {
157+ throw new LLMTimeoutError ( `Request timeout after ${ this . config . timeout } ms` ) ;
158+ }
159+
160+ if ( error instanceof TypeError && error . message . includes ( 'fetch' ) ) {
161+ throw new LLMConnectionError ( 'Failed to connect to LLM service' , error ) ;
162+ }
163+
164+ // Re-throw LLM-specific errors
165+ if ( error instanceof LLMServiceError ) {
166+ throw error ;
167+
168+ }
169+ throw new LLMServiceError (
170+ `Unexpected error: ${ error instanceof Error ? error . message : 'Unknown' } ` ,
171+ undefined ,
172+ error instanceof Error ? error : undefined
173+ ) ;
174+ }
175+ }
176+ }
177+
178+ throw new LLMServiceError ( 'Max retries exceeded' ) ;
179+ }
180+ private async handleErrorResponse ( response : Response , attempt : number ) {
181+ const errorText = await response . text ( ) . catch ( ( ) => 'No error details' ) ;
182+
183+ if ( response . status === 401 ) {
184+ throw new LLMAuthenticationError ( 'Authentication failed. Check credentials.' ) ;
185+ }
186+
187+ if ( response . status >= 500 && attempt < this . config . maxRetries ) {
188+ await this . delay ( Math . pow ( 2 , attempt ) * 1000 ) ;
189+ }
190+
191+ throw new LLMServiceError (
192+ `HTTP ${ response . status } : ${ errorText } ` ,
193+ response . status
194+ ) ;
195+ }
196+
197+ /**
198+ * Delay utility for retry backoff
199+ */
200+ private delay ( ms : number ) : Promise < void > {
201+ return new Promise ( resolve => setTimeout ( resolve , ms ) ) ;
202+ }
203+
204+ /**
205+ * Get current configuration (for debugging)
206+ */
207+ getConfig ( ) : Partial < LLMClientConfig > {
208+ return {
209+ baseUrl : this . config . baseUrl ,
210+ username : this . config . username ,
211+ defaultModel : this . config . defaultModel ,
212+ timeout : this . config . timeout ,
213+ maxRetries : this . config . maxRetries
214+ // Exclude password for security
215+ } ;
216+ }
217+ }
218+
219+ export default new LLMClient ( ) ;
0 commit comments