@@ -33,6 +33,7 @@ return {
3333 [" anthropic-version" ] = " 2023-06-01" ,
3434 [" anthropic-beta" ] = " prompt-caching-2024-07-31" ,
3535 },
36+ temp = {},
3637 handlers = {
3738 --- @param self CodeCompanion.Adapter
3839 --- @return boolean
@@ -41,6 +42,11 @@ return {
4142 self .parameters .stream = true
4243 end
4344
45+ -- Add the extended output header if enabled
46+ if self .temp .extended_output then
47+ self .headers [" anthropic-beta" ] = (self .headers [" anthropic-beta" ] .. " ," or " " ) .. " output-128k-2025-02-19"
48+ end
49+
4450 return true
4551 end ,
4652
@@ -50,6 +56,16 @@ return {
5056 --- @param messages table
5157 --- @return table
5258 form_parameters = function (self , params , messages )
59+ if self .temp .extended_thinking and self .temp .thinking_budget then
60+ params .thinking = {
61+ type = " enabled" ,
62+ budget_tokens = self .temp .thinking_budget ,
63+ }
64+ end
65+ if self .temp .extended_thinking then
66+ params .temperature = 1
67+ end
68+
5369 return params
5470 end ,
5571
@@ -175,9 +191,16 @@ return {
175191 if json .type == " message_start" then
176192 output .role = json .message .role
177193 output .content = " "
194+ elseif json .type == " content_block_start" then
195+ if json .content_block .type == " thinking" then
196+ output .reasoning = " "
197+ end
178198 elseif json .type == " content_block_delta" then
179- output .role = nil
180- output .content = json .delta .text
199+ if json .delta .type == " thinking_delta" then
200+ output .reasoning = json .delta .thinking
201+ else
202+ output .content = json .delta .text
203+ end
181204 elseif json .type == " message" then
182205 output .role = json .role
183206 output .content = json .content [1 ].text
@@ -235,26 +258,70 @@ return {
235258 desc = " The model that will complete your prompt. See https://docs.anthropic.com/claude/docs/models-overview for additional details and options." ,
236259 default = " claude-3-7-sonnet-20250219" ,
237260 choices = {
238- " claude-3-7-sonnet-20250219" ,
261+ [ " claude-3-7-sonnet-20250219" ] = { opts = { can_reason = true } } ,
239262 " claude-3-5-sonnet-20241022" ,
240263 " claude-3-5-haiku-20241022" ,
241264 " claude-3-opus-20240229" ,
242265 " claude-2.1" ,
243266 },
244267 },
245- max_tokens = {
268+ extended_output = {
246269 order = 2 ,
270+ mapping = " temp" ,
271+ type = " boolean" ,
272+ optional = true ,
273+ default = false ,
274+ desc = " Enable larger output context (128k tokens). Only available with claude-3-7-sonnet-20250219." ,
275+ },
276+ extended_thinking = {
277+ order = 3 ,
278+ mapping = " temp" ,
279+ type = " boolean" ,
280+ optional = true ,
281+ default = true ,
282+ desc = " Enable extended thinking for more thorough reasoning. Requires thinking_budget to be set." ,
283+ condition = function (schema )
284+ local model = schema .model .default
285+ if schema .model .choices [model ] and schema .model .choices [model ].opts then
286+ return schema .model .choices [model ].opts .can_reason
287+ end
288+ end ,
289+ },
290+ thinking_budget = {
291+ order = 4 ,
292+ mapping = " temp" ,
293+ type = " number" ,
294+ optional = true ,
295+ default = 16000 ,
296+ desc = " The maximum number of tokens to use for thinking when extended_thinking is enabled. Must be less than max_tokens." ,
297+ validate = function (n )
298+ return n > 0 , " Must be greater than 0"
299+ end ,
300+ condition = function (schema )
301+ local model = schema .model .default
302+ if schema .model .choices [model ] and schema .model .choices [model ].opts then
303+ return schema .model .choices [model ].opts .can_reason
304+ end
305+ end ,
306+ },
307+ max_tokens = {
308+ order = 5 ,
247309 mapping = " parameters" ,
248310 type = " number" ,
249311 optional = true ,
250- default = 4096 ,
312+ default = function (self )
313+ if self .schema .extended_thinking and self .schema .extended_thinking .default then
314+ return self .schema .thinking_budget .default + 1000
315+ end
316+ return 4096
317+ end ,
251318 desc = " The maximum number of tokens to generate before stopping. This parameter only specifies the absolute maximum number of tokens to generate. Different models have different maximum values for this parameter." ,
252319 validate = function (n )
253- return n > 0 and n <= 8192 , " Must be between 0 and 8192 "
320+ return n > 0 and n <= 128000 , " Must be between 0 and 128000 "
254321 end ,
255322 },
256323 temperature = {
257- order = 3 ,
324+ order = 6 ,
258325 mapping = " parameters" ,
259326 type = " number" ,
260327 optional = true ,
@@ -265,7 +332,7 @@ return {
265332 end ,
266333 },
267334 top_p = {
268- order = 4 ,
335+ order = 7 ,
269336 mapping = " parameters" ,
270337 type = " number" ,
271338 optional = true ,
@@ -276,7 +343,7 @@ return {
276343 end ,
277344 },
278345 top_k = {
279- order = 5 ,
346+ order = 8 ,
280347 mapping = " parameters" ,
281348 type = " number" ,
282349 optional = true ,
@@ -287,7 +354,7 @@ return {
287354 end ,
288355 },
289356 stop_sequences = {
290- order = 6 ,
357+ order = 9 ,
291358 mapping = " parameters" ,
292359 type = " list" ,
293360 optional = true ,
0 commit comments