Skip to content

Commit d6f84db

Browse files
authored
fix: truncate userInputMessage to first 500k characters (#1327)
* fix: truncate userInputMessage to 500k
1 parent 3338cc1 commit d6f84db

File tree

2 files changed

+22
-37
lines changed

2 files changed

+22
-37
lines changed

server/aws-lsp-codewhisperer/src/language-server/agenticChat/agenticChatController.test.ts

Lines changed: 12 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -962,19 +962,20 @@ describe('AgenticChatController', () => {
962962
assert.strictEqual(typedChatResult.data?.body, errorMsg)
963963
})
964964

965-
it('does not make backend request when input is too long ', async function () {
966-
const input = 'X'.repeat(generateAssistantResponseInputLimit + 1)
967-
const chatResult = await chatController.onChatPrompt(
968-
{ tabId: mockTabId, prompt: { prompt: input } },
969-
mockCancellationToken
965+
it('truncate input to 500k character ', async function () {
966+
const input = 'X'.repeat(generateAssistantResponseInputLimit + 10)
967+
generateAssistantResponseStub.restore()
968+
generateAssistantResponseStub = sinon.stub(CodeWhispererStreaming.prototype, 'generateAssistantResponse')
969+
generateAssistantResponseStub.callsFake(() => {})
970+
await chatController.onChatPrompt({ tabId: mockTabId, prompt: { prompt: input } }, mockCancellationToken)
971+
assert.ok(generateAssistantResponseStub.called)
972+
const calledRequestInput: GenerateAssistantResponseCommandInput =
973+
generateAssistantResponseStub.firstCall.firstArg
974+
assert.deepStrictEqual(
975+
calledRequestInput.conversationState?.currentMessage?.userInputMessage?.content?.length,
976+
generateAssistantResponseInputLimit
970977
)
971-
972-
const typedChatResult = chatResult as ResponseError<ChatResult>
973-
assert.ok(typedChatResult.message.includes('too long'))
974-
assert.ok(typedChatResult.data?.body?.includes('too long'))
975-
assert.ok(generateAssistantResponseStub.notCalled)
976978
})
977-
978979
it('shows generic errorMsg on internal errors', async function () {
979980
const chatResult = await chatController.onChatPrompt(
980981
{ tabId: mockTabId, prompt: { prompt: 'Hello' } },
@@ -1072,26 +1073,6 @@ describe('AgenticChatController', () => {
10721073
assert.strictEqual(typedChatResult.message, 'invalid state')
10731074
})
10741075

1075-
it('returns a user-friendly message when input is too long', async () => {
1076-
generateAssistantResponseStub.restore()
1077-
generateAssistantResponseStub = sinon.stub(CodeWhispererStreaming.prototype, 'generateAssistantResponse')
1078-
generateAssistantResponseStub.callsFake(() => {
1079-
const error = new Error('Input is too long')
1080-
throw error
1081-
})
1082-
1083-
const chatResult = await chatController.onChatPrompt(
1084-
{ tabId: mockTabId, prompt: { prompt: 'Hello with large context' } },
1085-
mockCancellationToken
1086-
)
1087-
1088-
const typedChatResult = chatResult as ResponseError<ChatResult>
1089-
assert.strictEqual(
1090-
typedChatResult.data?.body,
1091-
'Too much context loaded. I have cleared the conversation history. Please retry your request with smaller input.'
1092-
)
1093-
})
1094-
10951076
describe('#extractDocumentContext', () => {
10961077
const typescriptDocument = TextDocument.create('file:///test.ts', 'typescript', 1, 'test')
10971078
let extractDocumentContextStub: sinon.SinonStub

server/aws-lsp-codewhisperer/src/language-server/agenticChat/agenticChatController.ts

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -576,7 +576,7 @@ export class AgenticChatController implements ChatHandlers {
576576
await chatResultStream.writeResultBlock({ ...loadingMessage, messageId: loadingMessageId })
577577

578578
// Phase 3: Request Execution
579-
this.#validateRequest(currentRequestInput)
579+
this.#truncateRequest(currentRequestInput)
580580
const response = await session.generateAssistantResponse(currentRequestInput)
581581
this.#features.logging.info(
582582
`generateAssistantResponse ResponseMetadata: ${loggingUtils.formatObj(response.$metadata)}`
@@ -695,17 +695,21 @@ export class AgenticChatController implements ChatHandlers {
695695
}
696696

697697
/**
698-
* performs pre-validation of request before sending to backend service.
698+
* performs truncation of request before sending to backend service.
699699
* @param request
700700
*/
701-
#validateRequest(request: GenerateAssistantResponseCommandInput) {
701+
#truncateRequest(request: GenerateAssistantResponseCommandInput) {
702702
// Note: these logs are very noisy, but contain information redacted on the backend.
703703
this.#debug(`generateAssistantResponse Request: ${JSON.stringify(request, undefined, 2)}`)
704+
if (!request?.conversationState?.currentMessage?.userInputMessage) {
705+
return
706+
}
704707
const message = request.conversationState?.currentMessage?.userInputMessage?.content
705708
if (message && message.length > generateAssistantResponseInputLimit) {
706-
throw new AgenticChatError(
707-
`Message is too long with ${message.length} characters, max is ${generateAssistantResponseInputLimit}`,
708-
'PromptCharacterLimit'
709+
this.#debug(`Truncating userInputMessage to ${generateAssistantResponseInputLimit} characters}`)
710+
request.conversationState.currentMessage.userInputMessage.content = message.substring(
711+
0,
712+
generateAssistantResponseInputLimit
709713
)
710714
}
711715
}

0 commit comments

Comments
 (0)