@@ -962,19 +962,20 @@ describe('AgenticChatController', () => {
962962 assert . strictEqual ( typedChatResult . data ?. body , errorMsg )
963963 } )
964964
965- it ( 'does not make backend request when input is too long ' , async function ( ) {
966- const input = 'X' . repeat ( generateAssistantResponseInputLimit + 1 )
967- const chatResult = await chatController . onChatPrompt (
968- { tabId : mockTabId , prompt : { prompt : input } } ,
969- mockCancellationToken
965+ it ( 'truncate input to 500k character ' , async function ( ) {
966+ const input = 'X' . repeat ( generateAssistantResponseInputLimit + 10 )
967+ generateAssistantResponseStub . restore ( )
968+ generateAssistantResponseStub = sinon . stub ( CodeWhispererStreaming . prototype , 'generateAssistantResponse' )
969+ generateAssistantResponseStub . callsFake ( ( ) => { } )
970+ await chatController . onChatPrompt ( { tabId : mockTabId , prompt : { prompt : input } } , mockCancellationToken )
971+ assert . ok ( generateAssistantResponseStub . called )
972+ const calledRequestInput : GenerateAssistantResponseCommandInput =
973+ generateAssistantResponseStub . firstCall . firstArg
974+ assert . deepStrictEqual (
975+ calledRequestInput . conversationState ?. currentMessage ?. userInputMessage ?. content ?. length ,
976+ generateAssistantResponseInputLimit
970977 )
971-
972- const typedChatResult = chatResult as ResponseError < ChatResult >
973- assert . ok ( typedChatResult . message . includes ( 'too long' ) )
974- assert . ok ( typedChatResult . data ?. body ?. includes ( 'too long' ) )
975- assert . ok ( generateAssistantResponseStub . notCalled )
976978 } )
977-
978979 it ( 'shows generic errorMsg on internal errors' , async function ( ) {
979980 const chatResult = await chatController . onChatPrompt (
980981 { tabId : mockTabId , prompt : { prompt : 'Hello' } } ,
@@ -1072,26 +1073,6 @@ describe('AgenticChatController', () => {
10721073 assert . strictEqual ( typedChatResult . message , 'invalid state' )
10731074 } )
10741075
1075- it ( 'returns a user-friendly message when input is too long' , async ( ) => {
1076- generateAssistantResponseStub . restore ( )
1077- generateAssistantResponseStub = sinon . stub ( CodeWhispererStreaming . prototype , 'generateAssistantResponse' )
1078- generateAssistantResponseStub . callsFake ( ( ) => {
1079- const error = new Error ( 'Input is too long' )
1080- throw error
1081- } )
1082-
1083- const chatResult = await chatController . onChatPrompt (
1084- { tabId : mockTabId , prompt : { prompt : 'Hello with large context' } } ,
1085- mockCancellationToken
1086- )
1087-
1088- const typedChatResult = chatResult as ResponseError < ChatResult >
1089- assert . strictEqual (
1090- typedChatResult . data ?. body ,
1091- 'Too much context loaded. I have cleared the conversation history. Please retry your request with smaller input.'
1092- )
1093- } )
1094-
10951076 describe ( '#extractDocumentContext' , ( ) => {
10961077 const typescriptDocument = TextDocument . create ( 'file:///test.ts' , 'typescript' , 1 , 'test' )
10971078 let extractDocumentContextStub : sinon . SinonStub
0 commit comments