@@ -120,11 +120,10 @@ async def build_workflow(
120120
121121 # STEP 3: Use messaging layer to get LLM evaluation with dynamic token limit
122122 model_name = await detect_model_name (ctx )
123- dynamic_max_tokens = get_llm_output_limit (model_name )
124123 response_text = await llm_provider .send_message (
125124 messages = messages ,
126125 ctx = ctx ,
127- max_tokens = dynamic_max_tokens ,
126+ max_tokens = get_llm_output_limit ( model_name ) ,
128127 prefer_sampling = True ,
129128 )
130129
@@ -497,11 +496,10 @@ async def _evaluate_coding_plan(
497496
498497 # Use dynamic token limit for response
499498 model_name = await detect_model_name (ctx )
500- dynamic_max_tokens = get_llm_output_limit (model_name )
501499 response_text = await llm_provider .send_message (
502500 messages = messages ,
503501 ctx = ctx ,
504- max_tokens = dynamic_max_tokens ,
502+ max_tokens = get_llm_output_limit ( model_name ) ,
505503 prefer_sampling = True ,
506504 )
507505
@@ -695,11 +693,10 @@ async def judge_code_change(
695693
696694 # STEP 3: Use messaging layer for LLM evaluation with dynamic token limit
697695 model_name = await detect_model_name (ctx )
698- dynamic_max_tokens = get_llm_output_limit (model_name )
699696 response_text = await llm_provider .send_message (
700697 messages = messages ,
701698 ctx = ctx ,
702- max_tokens = dynamic_max_tokens ,
699+ max_tokens = get_llm_output_limit ( model_name ) ,
703700 prefer_sampling = True ,
704701 )
705702
0 commit comments