Skip to content
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@ import {
InsertToCursorPositionParams,
TextDocumentEdit,
InlineChatResult,
CancellationToken,
CancellationTokenSource,
} from '@aws/language-server-runtimes/server-interface'
import { TestFeatures } from '@aws/language-server-runtimes/testing'
import * as assert from 'assert'
Expand All @@ -43,6 +45,8 @@ import { AdditionalContextProvider } from './context/addtionalContextProvider'
import { ContextCommandsProvider } from './context/contextCommandsProvider'
import { ChatDatabase } from './tools/chatDb/chatDb'
import { LocalProjectContextController } from '../../shared/localProjectContextController'
import { CancellationError } from '@aws/lsp-core'
import { ToolApprovalException } from './tools/toolShared'

describe('AgenticChatController', () => {
const mockTabId = 'tab-1'
Expand Down Expand Up @@ -916,7 +920,7 @@ describe('AgenticChatController', () => {
})
})

it('returns a ResponseError if sendMessage returns an error', async () => {
it('propagates error message to final chat result', async () => {
generateAssistantResponseStub.callsFake(() => {
throw new Error('Error')
})
Expand All @@ -926,10 +930,13 @@ describe('AgenticChatController', () => {
mockCancellationToken
)

assert.ok(chatResult instanceof ResponseError)
// These checks will fail if a response error is returned.
const typedChatResult = chatResult as ChatResult
assert.strictEqual(typedChatResult.type, 'answer')
assert.strictEqual(typedChatResult.body, 'Error')
})

it('returns a auth follow up action if sendMessage returns an auth error', async () => {
it('returns an auth follow up action if model request returns an auth error', async () => {
generateAssistantResponseStub.callsFake(() => {
throw new Error('Error')
})
Expand All @@ -942,7 +949,8 @@ describe('AgenticChatController', () => {

const chatResult = await chatResultPromise

sinon.assert.callCount(testFeatures.lsp.sendProgress, 1) // called for loading message
// called once for error message propagation and once for loading message.
sinon.assert.callCount(testFeatures.lsp.sendProgress, 2)
assert.deepStrictEqual(chatResult, utils.createAuthFollowUpResult('full-auth'))
})

Expand Down Expand Up @@ -1844,6 +1852,23 @@ ${' '.repeat(8)}}

sinon.assert.calledOnce(tabBarActionStub)
})

it('determines when an error is a user action', function () {
const nonUserAction = new Error('User action error')
const cancellationError = new CancellationError('user')
const rejectionError = new ToolApprovalException()
const tokenSource = new CancellationTokenSource()

assert.ok(!chatController.isUserAction(nonUserAction))
assert.ok(chatController.isUserAction(cancellationError))
assert.ok(chatController.isUserAction(rejectionError))

assert.ok(!chatController.isUserAction(nonUserAction, tokenSource.token))

tokenSource.cancel()

assert.ok(chatController.isUserAction(nonUserAction, tokenSource.token))
})
})

// The body may include text-based progress updates from tool invocations.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
* Will be deleted or merged.
*/

import * as path from 'path'

Check warning on line 6 in server/aws-lsp-codewhisperer/src/language-server/agenticChat/agenticChatController.ts

View workflow job for this annotation

GitHub Actions / Test

Do not import Node.js builtin module "path"

Check warning on line 6 in server/aws-lsp-codewhisperer/src/language-server/agenticChat/agenticChatController.ts

View workflow job for this annotation

GitHub Actions / Test (Windows)

Do not import Node.js builtin module "path"
import {
ChatTriggerType,
GenerateAssistantResponseCommandInput,
Expand Down Expand Up @@ -101,6 +101,7 @@
import { FsWrite, FsWriteParams, getDiffChanges } from './tools/fsWrite'
import { ExecuteBash, ExecuteBashOutput, ExecuteBashParams } from './tools/executeBash'
import { ExplanatoryParams, InvokeOutput, ToolApprovalException } from './tools/toolShared'
import { ModelServiceException } from './errors'
import { FileSearch, FileSearchParams } from './tools/fileSearch'
import { GrepSearch, SanitizedRipgrepOutput } from './tools/grepSearch'

Expand Down Expand Up @@ -337,8 +338,20 @@
chatResultStream
)
} catch (err) {
// TODO: On ToolValidationException, we want to show custom mynah-ui components making it clear it was cancelled.
if (CancellationError.isUserCancelled(err) || err instanceof ToolApprovalException) {
// HACK: the chat-client needs to have a partial event with the associated messageId sent before it can accept the final result.
// Without this, the `thinking` indicator never goes away.
// Note: buttons being explicitly empty is required for this hack to work.
const errorMessageId = `error-message-id-${uuid()}`
await this.#sendProgressToClient(
{
type: 'answer',
body: '',
messageId: errorMessageId,
buttons: [],
},
params.partialResultToken
)
if (this.isUserAction(err, token)) {
/**
* when the session is aborted it generates an error.
* we need to resolve this error with an answer so the
Expand All @@ -347,9 +360,11 @@
return {
type: 'answer',
body: '',
messageId: errorMessageId,
buttons: [],
}
}
return this.#handleRequestError(err, params.tabId, metric)
return this.#handleRequestError(err, errorMessageId, params.tabId, metric)
}
}

Expand Down Expand Up @@ -445,10 +460,9 @@
}

// Phase 3: Request Execution
this.#debug(`Request Input: ${JSON.stringify(currentRequestInput)}`)

const response = await session.generateAssistantResponse(currentRequestInput)
this.#debug(`Response received for iteration ${iterationCount}:`, JSON.stringify(response.$metadata))
const response = await this.fetchModelResponse(currentRequestInput, i =>
session.generateAssistantResponse(i)
)

// remove the temp loading message when we have response
if (loadingMessageId) {
Expand Down Expand Up @@ -699,8 +713,8 @@
this.#features.chat.sendChatUpdate({ tabId, state: { inProgress: false } })
loadingMessageId = undefined
}
// If we did not approve a tool to be used or the user stopped the response, bubble this up to interrupt agentic loop
if (CancellationError.isUserCancelled(err) || err instanceof ToolApprovalException) {

if (this.isUserAction(err, token)) {
if (err instanceof ToolApprovalException && toolUse.name === 'executeBash') {
if (buttonBlockId) {
await chatResultStream.overwriteResultBlock(
Expand All @@ -714,9 +728,6 @@
throw err
}
const errMsg = err instanceof Error ? err.message : 'unknown error'
await chatResultStream.writeResultBlock({
body: toolErrorMessage(toolUse, errMsg),
})
this.#log(`Error running tool ${toolUse.name}:`, errMsg)
results.push({
toolUseId: toolUse.toolUseId,
Expand All @@ -729,6 +740,34 @@
return results
}

/**
* Determines if error is thrown as a result of a user action (Ex. rejecting tool, stop button)
* @param err
* @returns
*/
isUserAction(err: unknown, token?: CancellationToken): boolean {
return (
CancellationError.isUserCancelled(err) ||
err instanceof ToolApprovalException ||
(token?.isCancellationRequested ?? false)
)
}

async fetchModelResponse<RequestType, ResponseType>(
requestInput: RequestType,
makeRequest: (requestInput: RequestType) => Promise<ResponseType>
): Promise<ResponseType> {
this.#debug(`Q Backend Request: ${JSON.stringify(requestInput)}`)
try {
const response = await makeRequest(requestInput)
this.#debug(`Q Backend Response: ${JSON.stringify(response)}`)
return response
} catch (e) {
this.#features.logging.error(`Error in call: ${JSON.stringify(e)}`)
throw new ModelServiceException(e as Error)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Technically, we also throw throw new Error('amazonQServiceManager is not initialized') from within generateAssistantResponse (aka makeRequest) function. Which is something that should not be show to user.

Additionally, is exception thrown from Q API user-friendly enough to show it to user?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We were asking the same question. We reached out the backend team and they said this should be okay for now, but we'll likely want something better in the long run.

I was hoping to find a better solution here, but wasn't able to given the time constraints.

}
}

#validateToolResult(toolUse: ToolUse, result: ToolResultContentBlock) {
let maxToolResponseSize
switch (toolUse.name) {
Expand Down Expand Up @@ -1111,6 +1150,7 @@
*/
#handleRequestError(
err: any,
errorMessageId: string,
tabId: string,
metric: Metric<CombinedConversationEvent>
): ChatResult | ResponseError<ChatResult> {
Expand All @@ -1119,12 +1159,23 @@
this.#telemetryController.emitMessageResponseError(tabId, metric.metric, err.requestId, err.message)
}

if (err instanceof AmazonQServicePendingSigninError) {
// return non-model errors back to the client as errors
if (!(err instanceof ModelServiceException)) {
this.#log(`unknown error ${err instanceof Error ? JSON.stringify(err) : 'unknown'}`)
this.#debug(`stack ${err instanceof Error ? JSON.stringify(err.stack) : 'unknown'}`)
this.#debug(`cause ${err instanceof Error ? JSON.stringify(err.cause) : 'unknown'}`)
return new ResponseError<ChatResult>(
LSPErrorCodes.RequestFailed,
err instanceof Error ? err.message : 'Unknown request error'
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Are there any plans to add something like innerCode which holds the code (sdk v2) or name (sdk v3) from the original error?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

it sounds like their isn't yet a reliable way to distinguish errors produced by the backend yet (source). So rather than rely on their errors, I refactored this to wrap their error and include a code field , but only on the language server side, and is not sent back to the client.

My intention with this error handling is to do as much on the language server side as possible, such that the client will only have to render the ChatResult from the data field of the ResponseError. My thinking is that this will reduce the amount of custom error handling each IDE will have to implement.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That makes. But for unforeseen cases, we might want to give clients an escape-hatch? Else they will need to do their own checks, or check the message text, for errors that require client-side handling (such as #1197 )

)
}

if (err.cause instanceof AmazonQServicePendingSigninError) {
this.#log(`Q Chat SSO Connection error: ${getErrorMessage(err)}`)
return createAuthFollowUpResult('full-auth')
}

if (err instanceof AmazonQServicePendingProfileError) {
if (err.cause instanceof AmazonQServicePendingProfileError) {
this.#log(`Q Chat SSO Connection error: ${getErrorMessage(err)}`)
const followUpResult = createAuthFollowUpResult('use-supported-auth')
// Access first element in array
Expand All @@ -1140,13 +1191,14 @@
return createAuthFollowUpResult(authFollowType)
}

this.#log(`Q api request error ${err instanceof Error ? JSON.stringify(err) : 'unknown'}`)
this.#debug(`Q api request error stack ${err instanceof Error ? JSON.stringify(err.stack) : 'unknown'}`)
this.#debug(`Q api request error cause ${err instanceof Error ? JSON.stringify(err.cause) : 'unknown'}`)
return new ResponseError<ChatResult>(
LSPErrorCodes.RequestFailed,
err instanceof Error ? err.message : 'Unknown request error'
)
const backendError = err.cause
// Send the backend error message directly to the client to be displayed in chat.
return {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Another, more LSP, way to handle this would be to return ResponseError in result of LSP request. Then extension would need to translate that to error on the UI by calling "errorMessage" command with params through postMessage.

Copy link
Contributor Author

@Hweinstock Hweinstock Apr 24, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, that makes sense. My thinking was that if we wanted any custom formatting on this message in the future it could be done once on the LSP side rather than reimplemented by each client. Is there a way to do that using ResponseError?

Looks like this can be done by passing in a ChatResult as data in the error potentially?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ResponseError has generic data param, so that might be used. Custom formatting could also be on chat-client side (here), depending on what exactly you mean.

type: 'answer',
body: backendError.message,
messageId: errorMessageId,
buttons: [],
}
}

async onInlineChatPrompt(
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
export class ModelServiceException {
public constructor(public readonly cause: Error) {}
}
Loading