File tree Expand file tree Collapse file tree 3 files changed +22
-10
lines changed
packages/types/src/providers Expand file tree Collapse file tree 3 files changed +22
-10
lines changed Original file line number Diff line number Diff line change @@ -10,6 +10,7 @@ export type GroqModelId =
1010 | "qwen-qwq-32b"
1111 | "qwen/qwen3-32b"
1212 | "deepseek-r1-distill-llama-70b"
13+ | "moonshotai/kimi-k2-instruct"
1314
1415export const groqDefaultModelId : GroqModelId = "llama-3.3-70b-versatile" // Defaulting to Llama3 70B Versatile
1516
@@ -87,4 +88,13 @@ export const groqModels = {
8788 outputPrice : 0.99 ,
8889 description : "DeepSeek R1 Distill Llama 70B model, 128K context." ,
8990 } ,
91+ "moonshotai/kimi-k2-instruct" : {
92+ maxTokens : 131072 ,
93+ contextWindow : 131072 ,
94+ supportsImages : false ,
95+ supportsPromptCache : false ,
96+ inputPrice : 1.0 ,
97+ outputPrice : 3.0 ,
98+ description : "Moonshot AI Kimi K2 Instruct 1T model, 128K context." ,
99+ } ,
90100} as const satisfies Record < string , ModelInfo >
Original file line number Diff line number Diff line change @@ -41,6 +41,7 @@ import { ClineAskResponse } from "../../shared/WebviewMessage"
4141import { defaultModeSlug } from "../../shared/modes"
4242import { DiffStrategy } from "../../shared/tools"
4343import { EXPERIMENT_IDS , experiments } from "../../shared/experiments"
44+ import { getModelMaxOutputTokens } from "../../shared/api"
4445
4546// services
4647import { UrlContentFetcher } from "../../services/browser/UrlContentFetcher"
@@ -1716,15 +1717,13 @@ export class Task extends EventEmitter<ClineEvents> {
17161717 const { contextTokens } = this . getTokenUsage ( )
17171718
17181719 if ( contextTokens ) {
1719- // Default max tokens value for thinking models when no specific
1720- // value is set.
1721- const DEFAULT_THINKING_MODEL_MAX_TOKENS = 16_384
1722-
17231720 const modelInfo = this . api . getModel ( ) . info
17241721
1725- const maxTokens = modelInfo . supportsReasoningBudget
1726- ? this . apiConfiguration . modelMaxTokens || DEFAULT_THINKING_MODEL_MAX_TOKENS
1727- : modelInfo . maxTokens
1722+ const maxTokens = getModelMaxOutputTokens ( {
1723+ modelId : this . api . getModel ( ) . id ,
1724+ model : modelInfo ,
1725+ settings : this . apiConfiguration ,
1726+ } )
17281727
17291728 const contextWindow = modelInfo . contextWindow
17301729
Original file line number Diff line number Diff line change @@ -82,9 +82,12 @@ export const getModelMaxOutputTokens = ({
8282 return ANTHROPIC_DEFAULT_MAX_TOKENS
8383 }
8484
85- // If maxTokens is 0 or undefined, fall back to 20% of context window
86- // This matches the sliding window logic
87- return model . maxTokens || Math . ceil ( model . contextWindow * 0.2 )
85+ // If maxTokens is 0 or undefined or the full context window, fall back to 20% of context window
86+ if ( model . maxTokens && model . maxTokens !== model . contextWindow ) {
87+ return model . maxTokens
88+ } else {
89+ return Math . ceil ( model . contextWindow * 0.2 )
90+ }
8891}
8992
9093// GetModelsOptions
You can’t perform that action at this time.
0 commit comments