88
99namespace DevProxy . Abstractions . LanguageModel ;
1010
11- public class LMStudioLanguageModelClient ( LanguageModelConfiguration ? configuration , ILogger logger ) : ILanguageModelClient
11+ public class OpenAILanguageModelClient ( LanguageModelConfiguration ? configuration , ILogger logger ) : ILanguageModelClient
1212{
1313 private readonly LanguageModelConfiguration ? _configuration = configuration ;
1414 private readonly ILogger _logger = logger ;
1515 private bool ? _lmAvailable ;
16- private readonly Dictionary < string , OpenAICompletionResponse > _cacheCompletion = [ ] ;
1716 private readonly Dictionary < ILanguageModelChatCompletionMessage [ ] , OpenAIChatCompletionResponse > _cacheChatCompletion = [ ] ;
1817
1918 public async Task < bool > IsEnabledAsync ( )
@@ -29,6 +28,8 @@ public async Task<bool> IsEnabledAsync()
2928
3029 private async Task < bool > IsEnabledInternalAsync ( )
3130 {
31+ using var scope = _logger . BeginScope ( nameof ( OpenAILanguageModelClient ) ) ;
32+
3233 if ( _configuration is null || ! _configuration . Enabled )
3334 {
3435 return false ;
@@ -50,20 +51,14 @@ private async Task<bool> IsEnabledInternalAsync()
5051
5152 try
5253 {
53- // check if lm is on
54- using var client = new HttpClient ( ) ;
55- var response = await client . GetAsync ( $ "{ _configuration . Url } /v1/models") ;
56- _logger . LogDebug ( "Response: {response}" , response . StatusCode ) ;
57-
58- if ( ! response . IsSuccessStatusCode )
54+ var testCompletion = await GenerateChatCompletionInternalAsync ( [ new ( )
5955 {
60- return false ;
61- }
62-
63- var testCompletion = await GenerateCompletionInternalAsync ( "Are you there? Reply with a yes or no." ) ;
64- if ( testCompletion ? . Error is not null )
56+ Content = "Are you there? Reply with a yes or no." ,
57+ Role = "user"
58+ } ] ) ;
59+ if ( testCompletion ? . ErrorMessage is not null )
6560 {
66- _logger . LogError ( "Error: {error}. Param: {param} " , testCompletion . Error . Message , testCompletion . Error . Param ) ;
61+ _logger . LogError ( "Error: {error}" , testCompletion . ErrorMessage ) ;
6762 return false ;
6863 }
6964
@@ -78,90 +73,41 @@ private async Task<bool> IsEnabledInternalAsync()
7873
7974 public async Task < ILanguageModelCompletionResponse ? > GenerateCompletionAsync ( string prompt , CompletionOptions ? options = null )
8075 {
81- using var scope = _logger . BeginScope ( nameof ( LMStudioLanguageModelClient ) ) ;
82-
83- if ( _configuration is null )
84- {
85- return null ;
86- }
87-
88- if ( ! _lmAvailable . HasValue )
89- {
90- _logger . LogError ( "Language model availability is not checked. Call {isEnabled} first." , nameof ( IsEnabledAsync ) ) ;
91- return null ;
92- }
93-
94- if ( ! _lmAvailable . Value )
95- {
96- return null ;
97- }
98-
99- if ( _configuration . CacheResponses && _cacheCompletion . TryGetValue ( prompt , out var cachedResponse ) )
100- {
101- _logger . LogDebug ( "Returning cached response for prompt: {prompt}" , prompt ) ;
102- return cachedResponse ;
103- }
104-
105- var response = await GenerateCompletionInternalAsync ( prompt , options ) ;
76+ var response = await GenerateChatCompletionAsync ( [ new OpenAIChatCompletionMessage ( ) { Content = prompt , Role = "user" } ] , options ) ;
10677 if ( response == null )
10778 {
10879 return null ;
10980 }
110- if ( response . Error is not null )
81+ if ( response . ErrorMessage is not null )
11182 {
112- _logger . LogError ( "Error: {error}. Param: {param} " , response . Error . Message , response . Error . Param ) ;
83+ _logger . LogError ( "Error: {error}" , response . ErrorMessage ) ;
11384 return null ;
11485 }
115- else
116- {
117- if ( _configuration . CacheResponses && response . Response is not null )
118- {
119- _cacheCompletion [ prompt ] = response ;
120- }
86+ var openAIResponse = ( OpenAIChatCompletionResponse ) response ;
12187
122- return response ;
123- }
124- }
125-
126- private async Task < OpenAICompletionResponse ? > GenerateCompletionInternalAsync ( string prompt , CompletionOptions ? options = null )
127- {
128- Debug . Assert ( _configuration != null , "Configuration is null" ) ;
129-
130- try
88+ return new OpenAICompletionResponse
13189 {
132- using var client = new HttpClient ( ) ;
133- var url = $ "{ _configuration . Url } /v1/completions";
134- _logger . LogDebug ( "Requesting completion. Prompt: {prompt}" , prompt ) ;
135-
136- var response = await client . PostAsJsonAsync ( url ,
137- new
138- {
139- prompt ,
140- model = _configuration . Model ,
141- stream = false ,
142- temperature = options ? . Temperature ?? 0.8 ,
143- }
144- ) ;
145- _logger . LogDebug ( "Response: {response}" , response . StatusCode ) ;
146-
147- var res = await response . Content . ReadFromJsonAsync < OpenAICompletionResponse > ( ) ;
148- if ( res is null )
90+ Choices = openAIResponse . Choices ? . Select ( c => new OpenAICompletionResponseChoice
14991 {
150- return res ;
151- }
152- res . RequestUrl = url ;
153- return res ;
154- }
155- catch ( Exception ex )
156- {
157- _logger . LogError ( ex , "Failed to generate completion" ) ;
158- return null ;
159- }
92+ ContentFilterResults = c . ContentFilterResults ,
93+ FinishReason = c . FinishReason ,
94+ Index = c . Index ,
95+ LogProbabilities = c . LogProbabilities ,
96+ Text = c . Message . Content
97+ } ) . ToArray ( ) ,
98+ Created = openAIResponse . Created ,
99+ Error = openAIResponse . Error ,
100+ Id = openAIResponse . Id ,
101+ Model = openAIResponse . Model ,
102+ Object = openAIResponse . Object ,
103+ PromptFilterResults = openAIResponse . PromptFilterResults ,
104+ Usage = openAIResponse . Usage ,
105+ } ;
160106 }
161107
162- public async Task < ILanguageModelCompletionResponse ? > GenerateChatCompletionAsync ( ILanguageModelChatCompletionMessage [ ] messages )
108+ public async Task < ILanguageModelCompletionResponse ? > GenerateChatCompletionAsync ( ILanguageModelChatCompletionMessage [ ] messages , CompletionOptions ? options = null )
163109 {
164- using var scope = _logger . BeginScope ( nameof ( LMStudioLanguageModelClient ) ) ;
110+ using var scope = _logger . BeginScope ( nameof ( OpenAILanguageModelClient ) ) ;
165111
166112 if ( _configuration is null )
167113 {
@@ -185,14 +131,14 @@ private async Task<bool> IsEnabledInternalAsync()
185131 return cachedResponse ;
186132 }
187133
188- var response = await GenerateChatCompletionInternalAsync ( messages ) ;
134+ var response = await GenerateChatCompletionInternalAsync ( [ .. messages . Select ( m => ( OpenAIChatCompletionMessage ) m ) ] , options ) ;
189135 if ( response == null )
190136 {
191137 return null ;
192138 }
193139 if ( response . Error is not null )
194140 {
195- _logger . LogError ( "Error: {error}. Param : {param }" , response . Error . Message , response . Error . Param ) ;
141+ _logger . LogError ( "Error: {error}. Code : {code }" , response . Error . Message , response . Error . Code ) ;
196142 return null ;
197143 }
198144 else
@@ -206,24 +152,25 @@ private async Task<bool> IsEnabledInternalAsync()
206152 }
207153 }
208154
209- private async Task < OpenAIChatCompletionResponse ? > GenerateChatCompletionInternalAsync ( ILanguageModelChatCompletionMessage [ ] messages )
155+ private async Task < OpenAIChatCompletionResponse ? > GenerateChatCompletionInternalAsync ( OpenAIChatCompletionMessage [ ] messages , CompletionOptions ? options = null )
210156 {
211157 Debug . Assert ( _configuration != null , "Configuration is null" ) ;
212158
213159 try
214160 {
215161 using var client = new HttpClient ( ) ;
216- var url = $ "{ _configuration . Url } /v1/ chat/completions";
162+ var url = $ "{ _configuration . Url } /chat/completions";
217163 _logger . LogDebug ( "Requesting chat completion. Message: {lastMessage}" , messages . Last ( ) . Content ) ;
218164
219- var response = await client . PostAsJsonAsync ( url ,
220- new
221- {
222- messages ,
223- model = _configuration . Model ,
224- stream = false
225- }
226- ) ;
165+ var payload = new OpenAIChatCompletionRequest
166+ {
167+ Messages = messages ,
168+ Model = _configuration . Model ,
169+ Stream = false ,
170+ Temperature = options ? . Temperature
171+ } ;
172+
173+ var response = await client . PostAsJsonAsync ( url , payload ) ;
227174 _logger . LogDebug ( "Response: {response}" , response . StatusCode ) ;
228175
229176 var res = await response . Content . ReadFromJsonAsync < OpenAIChatCompletionResponse > ( ) ;
@@ -243,7 +190,7 @@ private async Task<bool> IsEnabledInternalAsync()
243190 }
244191}
245192
246- internal static class CacheChatCompletionExtensions
193+ internal static class OpenAICacheChatCompletionExtensions
247194{
248195 public static OpenAIChatCompletionMessage [ ] ? GetKey (
249196 this Dictionary < OpenAIChatCompletionMessage [ ] , OpenAIChatCompletionResponse > cache ,
0 commit comments