Skip to content

Commit 625741a

Browse files
committed
Add Extended Thinking and Extended Output for Cladue 3.7
1 parent 82cacda commit 625741a

File tree

3 files changed

+82
-18
lines changed

3 files changed

+82
-18
lines changed

doc/codecompanion.txt

Lines changed: 17 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
*codecompanion.txt* For NVIM v0.10.0 Last change: 2025 February 20
1+
*codecompanion.txt* For NVIM v0.10.0 Last change: 2025 February 25
22

33
==============================================================================
44
Table of Contents *codecompanion-table-of-contents*
@@ -646,7 +646,7 @@ USER CONTRIBUTED ADAPTERS ~
646646

647647
Thanks to the community for building and supporting the following adapters:
648648

649-
- Venice.ai <https://github.com/olimorris/codecompanion.nvim/discussions/775>
649+
- Venice.ai <https://github.com/olimorris/codecompanion.nvim/discussions/972>
650650
- Fireworks.ai <https://github.com/olimorris/codecompanion.nvim/discussions/693>
651651

652652
The section of the discussion forums which is dedicated to user created
@@ -1467,17 +1467,26 @@ nearest codeblock.
14671467

14681468
AUTOMATICALLY UPDATE A BUFFER ~
14691469

1470-
The |codecompanion-usage-chat-buffer-agents-editor| tool enables an LLM to
1471-
modify code in a Neovim buffer. This is especially useful if you do not wish to
1472-
manually apply an LLM’s suggestions yourself. Simply tag it in the chat
1473-
buffer with `@editor`.
1470+
The |codecompanion-usage-chat-buffer-agents-editor| tool, combined with the
1471+
`#buffer` variable, enables an LLM to modify code in a Neovim buffer. This is
1472+
especially useful if you do not wish to manually apply an LLM’s suggestions
1473+
yourself. Simply tag it in the chat buffer with `@editor`. To ensure the LLM
1474+
can consistently see the changes that it’s applied, be sure to
1475+
|codecompanion-usage-chat-buffer--references| your buffers.
14741476

14751477

14761478
RUN TESTS FROM THE CHAT BUFFER ~
14771479

14781480
The |codecompanion-usage-chat-buffer-agents-cmd-runner| tool enables an LLM to
14791481
execute commands on your machine. This can be useful if you wish the LLM to run
1480-
a test suite on your behalf and give insight on failing cases.
1482+
a test suite on your behalf and give insight on failing cases. Simply tag the
1483+
`@cmd_runner` in the chat buffer and ask it run your tests with a suitable
1484+
command e.g. `pytest`.
1485+
1486+
1487+
NAVIGATING BETWEEN RESPONSES IN THE CHAT BUFFER ~
1488+
1489+
You can quickly move between responses in the chat buffer using `[[` or `]]`.
14811490

14821491

14831492
QUICKLY ACCESSING A CHAT BUFFER ~
@@ -2503,7 +2512,7 @@ OpenAI adapter.
25032512
as a great reference to understand how they’re working with the output of the
25042513
API
25052514

2506-
OPENAI€�S API OUTPUT
2515+
OPENAIS API OUTPUT
25072516

25082517
If we reference the OpenAI documentation
25092518
<https://platform.openai.com/docs/guides/text-generation/chat-completions-api>

lua/codecompanion/adapters/anthropic.lua

Lines changed: 63 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,11 @@ return {
4141
self.parameters.stream = true
4242
end
4343

44+
-- Add the extended output header if enabled
45+
if self.parameters.extended_output then
46+
self.headers["anthropic-beta"] = "prompt-caching-2024-07-31,output-128k-2025-02-19"
47+
end
48+
4449
return true
4550
end,
4651

@@ -50,6 +55,22 @@ return {
5055
---@param messages table
5156
---@return table
5257
form_parameters = function(self, params, messages)
58+
-- Add thinking configuration if extended_thinking is enabled
59+
if params.extended_thinking and params.thinking_budget then
60+
params.thinking = {
61+
type = "enabled",
62+
budget_tokens = params.thinking_budget
63+
}
64+
end
65+
if params.extended_thinking then
66+
params.temperature = 1
67+
end
68+
69+
-- Remove our custom parameters that aren't part of the Anthropic API
70+
params.extended_thinking = nil
71+
params.extended_output = nil
72+
params.thinking_budget = nil
73+
5374
return params
5475
end,
5576

@@ -175,9 +196,16 @@ return {
175196
if json.type == "message_start" then
176197
output.role = json.message.role
177198
output.content = ""
199+
elseif json.type == "content_block_start" then
200+
if json.content_block.type == "thinking" then
201+
output.reasoning = ""
202+
end
178203
elseif json.type == "content_block_delta" then
179-
output.role = nil
180-
output.content = json.delta.text
204+
if json.delta.type == "thinking_delta" then
205+
output.reasoning = json.delta.thinking
206+
else
207+
output.content = json.delta.text
208+
end
181209
elseif json.type == "message" then
182210
output.role = json.role
183211
output.content = json.content[1].text
@@ -242,19 +270,46 @@ return {
242270
"claude-2.1",
243271
},
244272
},
245-
max_tokens = {
273+
extended_output = {
246274
order = 2,
247275
mapping = "parameters",
276+
type = "boolean",
277+
optional = true,
278+
default = false,
279+
desc = "Enable larger output context (128k tokens). Only available with claude-3-7-sonnet-20250219.",
280+
},
281+
extended_thinking = {
282+
order = 3,
283+
mapping = "parameters",
284+
type = "boolean",
285+
optional = true,
286+
default = false,
287+
desc = "Enable extended thinking for more thorough reasoning. Requires thinking_budget to be set.",
288+
},
289+
thinking_budget = {
290+
order = 4,
291+
mapping = "parameters",
292+
type = "number",
293+
optional = true,
294+
default = 16000,
295+
desc = "The maximum number of tokens to use for thinking when extended_thinking is enabled. Must be less than max_tokens.",
296+
validate = function(n)
297+
return n > 0, "Must be greater than 0"
298+
end,
299+
},
300+
max_tokens = {
301+
order = 5,
302+
mapping = "parameters",
248303
type = "number",
249304
optional = true,
250305
default = 4096,
251306
desc = "The maximum number of tokens to generate before stopping. This parameter only specifies the absolute maximum number of tokens to generate. Different models have different maximum values for this parameter.",
252307
validate = function(n)
253-
return n > 0 and n <= 8192, "Must be between 0 and 8192"
308+
return n > 0 and n <= 32768, "Must be between 0 and 32768"
254309
end,
255310
},
256311
temperature = {
257-
order = 3,
312+
order = 6,
258313
mapping = "parameters",
259314
type = "number",
260315
optional = true,
@@ -265,7 +320,7 @@ return {
265320
end,
266321
},
267322
top_p = {
268-
order = 4,
323+
order = 7,
269324
mapping = "parameters",
270325
type = "number",
271326
optional = true,
@@ -276,7 +331,7 @@ return {
276331
end,
277332
},
278333
top_k = {
279-
order = 5,
334+
order = 8,
280335
mapping = "parameters",
281336
type = "number",
282337
optional = true,
@@ -287,7 +342,7 @@ return {
287342
end,
288343
},
289344
stop_sequences = {
290-
order = 6,
345+
order = 9,
291346
mapping = "parameters",
292347
type = "list",
293348
optional = true,

lua/codecompanion/strategies/chat/init.lua

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1034,11 +1034,11 @@ function Chat:add_buf_message(data, opts)
10341034
-- Add data to the chat buffer
10351035
local function append_data()
10361036
if data.reasoning then
1037-
has_been_reasoning = true
1038-
if new_response then
1037+
if not has_been_reasoning then
10391038
table.insert(lines, "### Reasoning")
10401039
table.insert(lines, "")
10411040
end
1041+
has_been_reasoning = true
10421042
write(data.reasoning)
10431043
end
10441044
if data.content then

0 commit comments

Comments
 (0)