@@ -13,21 +13,33 @@ import ai.koog.agents.core.environment.ReceivedToolResult
1313import ai.koog.agents.core.environment.result
1414import ai.koog.agents.core.feature.handler.tool.ToolCallCompletedContext
1515import ai.koog.agents.core.feature.handler.tool.ToolCallStartingContext
16+ import ai.koog.agents.core.tools.ToolDescriptor
1617import ai.koog.agents.core.tools.ToolRegistry
1718import ai.koog.agents.ext.tool.ExitTool
1819import ai.koog.agents.ext.tool.shell.ShellCommandConfirmation
1920import ai.koog.agents.features.eventHandler.feature.handleEvents
2021import ai.koog.agents.features.tokenizer.feature.MessageTokenizer
2122import ai.koog.agents.features.tokenizer.feature.tokenizer
23+ import ai.koog.prompt.dsl.Prompt
2224import ai.koog.prompt.dsl.prompt
25+ import ai.koog.prompt.executor.clients.anthropic.AnthropicParams
26+ import ai.koog.prompt.executor.clients.anthropic.models.AnthropicThinking
2327import ai.koog.prompt.executor.clients.LLMClient
28+ import ai.koog.prompt.executor.clients.openai.OpenAIResponsesParams
29+ import ai.koog.prompt.executor.clients.openai.base.models.ReasoningEffort
30+ import ai.koog.prompt.executor.clients.openai.models.ReasoningConfig
31+ import ai.koog.prompt.executor.clients.openai.models.ReasoningSummary
2432import ai.koog.prompt.executor.model.PromptExecutor
33+ import ai.koog.prompt.llm.LLMCapability
2534import ai.koog.prompt.llm.LLMProvider
35+ import ai.koog.prompt.llm.LLModel
2636import ai.koog.prompt.message.Message
37+ import ai.koog.prompt.params.LLMParams
2738import ai.koog.prompt.tokenizer.Tokenizer
2839import com.intellij.openapi.components.service
2940import com.intellij.openapi.project.Project
3041import ee.carlrobert.codegpt.EncodingManager
42+ import ee.carlrobert.codegpt.agent.clients.CustomOpenAILLMClient
3143import ee.carlrobert.codegpt.agent.clients.RetryingPromptExecutor
3244import ee.carlrobert.codegpt.agent.credits.extractCreditsSnapshot
3345import ee.carlrobert.codegpt.agent.tools.*
@@ -46,6 +58,8 @@ import kotlin.time.Duration.Companion.seconds
4658object AgentFactory {
4759
4860 private const val MAX_AGENT_ITERATIONS = 250
61+ private const val ANTHROPIC_MIN_THINKING_BUDGET = 512
62+ private const val ANTHROPIC_DEFAULT_THINKING_BUDGET = 2_048
4963
5064 fun createAgent (
5165 agentType : AgentType ,
@@ -170,18 +184,111 @@ object AgentFactory {
170184 featureType : FeatureType = FeatureType .AGENT
171185 ): PromptExecutor {
172186 val llmClient = LLMClientFactory .createClient(provider, featureType)
173- return createRetryingExecutor(llmClient, events)
174- }
175-
176- private fun createRetryingExecutor (client : LLMClient , events : AgentEvents ? ): PromptExecutor {
177187 val policy = RetryingPromptExecutor .RetryPolicy (
178188 maxAttempts = 5 ,
179189 initialDelay = 1 .seconds,
180190 maxDelay = 30 .seconds,
181191 backoffMultiplier = 2.0 ,
182192 jitterFactor = 0.1
183193 )
184- return RetryingPromptExecutor .fromClient(client, policy, events)
194+ return createRetryingExecutor(llmClient, policy, events)
195+ }
196+
197+ internal fun createRetryingExecutor (
198+ client : LLMClient ,
199+ policy : RetryingPromptExecutor .RetryPolicy ,
200+ events : AgentEvents ?
201+ ): PromptExecutor {
202+ val executor = RetryingPromptExecutor .fromClient(client, policy, events)
203+ return object : PromptExecutor {
204+ override fun executeStreaming (
205+ prompt : Prompt ,
206+ model : LLModel ,
207+ tools : List <ToolDescriptor >
208+ ) = executor.executeStreaming(prompt.withReasoningParams(model), model, tools)
209+
210+ override suspend fun execute (
211+ prompt : Prompt ,
212+ model : LLModel ,
213+ tools : List <ToolDescriptor >
214+ ) = executor.execute(prompt.withReasoningParams(model), model, tools)
215+
216+ override suspend fun moderate (prompt : Prompt , model : LLModel ) =
217+ executor.moderate(prompt, model)
218+
219+ override suspend fun models () = executor.models()
220+
221+ override fun close () = executor.close()
222+ }
223+ }
224+
225+ private fun Prompt.withReasoningParams (model : LLModel ): Prompt {
226+ val params = when (model.provider) {
227+ LLMProvider .OpenAI -> params.withOpenAIReasoning()
228+ CustomOpenAILLMClient .CustomOpenAI -> {
229+ if (model.supports(LLMCapability .OpenAIEndpoint .Responses )) {
230+ params.withOpenAIReasoning()
231+ } else {
232+ params
233+ }
234+ }
235+ LLMProvider .Anthropic -> params.withAnthropicReasoning()
236+ else -> params
237+ }
238+ return withParams(params)
239+ }
240+
241+ private fun LLMParams.withOpenAIReasoning (): LLMParams {
242+ val base = when (this ) {
243+ is OpenAIResponsesParams -> this
244+ else -> OpenAIResponsesParams (
245+ temperature = temperature,
246+ maxTokens = maxTokens,
247+ numberOfChoices = numberOfChoices,
248+ speculation = speculation,
249+ schema = schema,
250+ toolChoice = toolChoice,
251+ user = user,
252+ additionalProperties = additionalProperties
253+ )
254+ }
255+ return base.copy(
256+ reasoning = base.reasoning ? : ReasoningConfig (
257+ effort = ReasoningEffort .MEDIUM ,
258+ summary = ReasoningSummary .AUTO
259+ )
260+ )
261+ }
262+
263+ private fun LLMParams.withAnthropicReasoning (): LLMParams {
264+ val base = when (this ) {
265+ is AnthropicParams -> this
266+ else -> AnthropicParams (
267+ temperature = temperature,
268+ maxTokens = maxTokens,
269+ numberOfChoices = numberOfChoices,
270+ speculation = speculation,
271+ schema = schema,
272+ toolChoice = toolChoice,
273+ user = user,
274+ additionalProperties = additionalProperties
275+ )
276+ }
277+
278+ if (base.thinking != null ) return base
279+
280+ val thinkingBudget = resolveAnthropicThinkingBudget(base.maxTokens) ? : return base
281+ return base.copy(thinking = AnthropicThinking .Enabled (budgetTokens = thinkingBudget))
282+ }
283+
284+ private fun resolveAnthropicThinkingBudget (maxTokens : Int? ): Int? {
285+ val limit = maxTokens ? : ANTHROPIC_DEFAULT_THINKING_BUDGET
286+ if (limit <= ANTHROPIC_MIN_THINKING_BUDGET ) {
287+ return null
288+ }
289+ return (limit / 2 )
290+ .coerceAtLeast(ANTHROPIC_MIN_THINKING_BUDGET )
291+ .coerceAtMost(ANTHROPIC_DEFAULT_THINKING_BUDGET )
185292 }
186293
187294 private fun createGeneralPurposeAgent (
0 commit comments