Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
* Will be deleted or merged.
*/

import * as path from 'path'

Check warning on line 6 in server/aws-lsp-codewhisperer/src/language-server/agenticChat/agenticChatController.ts

View workflow job for this annotation

GitHub Actions / Test

Do not import Node.js builtin module "path"

Check warning on line 6 in server/aws-lsp-codewhisperer/src/language-server/agenticChat/agenticChatController.ts

View workflow job for this annotation

GitHub Actions / Test (Windows)

Do not import Node.js builtin module "path"
import {
ChatTriggerType,
CodeWhispererStreamingServiceException,
Expand Down Expand Up @@ -62,6 +62,7 @@
import { v4 as uuid } from 'uuid'
import {
AddMessageEvent,
ChatConversationType,
ChatInteractionType,
ChatTelemetryEventName,
CombinedConversationEvent,
Expand Down Expand Up @@ -150,6 +151,8 @@
#contextCommandsProvider: ContextCommandsProvider
#stoppedToolUses = new Set<string>()
#userWrittenCodeTracker: UserWrittenCodeTracker | undefined
#toolUseStartTimes: Record<string, number> = {}
#toolUseLatencies: Array<{ toolName: string; toolUseId: string; latency: number }> = []

/**
* Determines the appropriate message ID for a tool use based on tool type and name
Expand Down Expand Up @@ -224,7 +227,12 @@
try {
await this.#undoFileChange(toolUseId, session.data)
this.#updateUndoButtonAfterClick(params.tabId, toolUseId, session.data)
this.#telemetryController.emitInteractWithAgenticChat('RejectDiff', params.tabId)
this.#telemetryController.emitInteractWithAgenticChat(
'RejectDiff',
params.tabId,
session.data?.pairProgrammingMode,
session.data?.getConversationType()
)
} catch (err: any) {
return { success: false, failureReason: err.message }
}
Expand Down Expand Up @@ -410,11 +418,17 @@
messageId: 'stopped' + uuid(),
body: 'You stopped your current work, please provide additional examples or ask another question.',
})
this.#telemetryController.emitInteractWithAgenticChat('StopChat', params.tabId)
this.#telemetryController.emitInteractWithAgenticChat(
'StopChat',
params.tabId,
session.pairProgrammingMode,
session.getConversationType()
)
session.abortRequest()
void this.#invalidateAllShellCommands(params.tabId, session)
session.rejectAllDeferredToolExecutions(new CancellationError('user'))
})
session.setConversationType('AgenticChat')

const chatResultStream = this.#getChatResultStream(params.partialResultToken)

Expand Down Expand Up @@ -485,7 +499,7 @@
buttons: [],
}
}
return this.#handleRequestError(err, errorMessageId, params.tabId, metric)
return this.#handleRequestError(err, errorMessageId, params.tabId, metric, session.pairProgrammingMode)
}
}

Expand Down Expand Up @@ -657,34 +671,76 @@

if (pendingToolUses.length === 0) {
// No more tool uses, we're done
this.#telemetryController.emitAgencticLoop_InvokeLLM(
response.$metadata.requestId!,
conversationId,
'AgenticChat',
undefined,
undefined,
'Succeeded',
this.#features.runtime.serverInfo.version ?? '',
undefined,
session.pairProgrammingMode
)
finalResult = result
break
}

let content = ''
let toolResults: ToolResult[]
session.setConversationType('AgenticChatWithToolUse')
if (result.success) {
// Process tool uses and update the request input for the next iteration
toolResults = await this.#processToolUses(pendingToolUses, chatResultStream, session, tabId, token)
if (toolResults.some(toolResult => this.#shouldSendBackErrorContent(toolResult))) {
content = 'There was an error processing one or more tool uses. Try again, do not apologize.'
shouldDisplayMessage = false
}
metric.setDimension('cwsprChatConversationType', 'AgenticChatWithToolUse')
const conversationType = session.getConversationType() as ChatConversationType
metric.setDimension('cwsprChatConversationType', conversationType)
metric.setDimension('requestIds', metric.metric.requestIds)
const toolNames = this.#toolUseLatencies.map(item => item.toolName)
const toolUseIds = this.#toolUseLatencies.map(item => item.toolUseId)
const latency = this.#toolUseLatencies.map(item => item.latency)
this.#telemetryController.emitAgencticLoop_InvokeLLM(
response.$metadata.requestId!,
conversationId,
'AgenticChatWithToolUse',
toolNames ?? undefined,
toolUseIds ?? undefined,
'Succeeded',
this.#features.runtime.serverInfo.version ?? '',
latency,
session.pairProgrammingMode
)
} else {
// Send an error card to UI?
toolResults = pendingToolUses.map(toolUse => ({
toolUseId: toolUse.toolUseId,
status: ToolResultStatus.ERROR,
content: [{ text: result.error }],
}))
this.#telemetryController.emitAgencticLoop_InvokeLLM(
response.$metadata.requestId!,
conversationId,
'AgenticChatWithToolUse',
undefined,
undefined,
'Failed',
this.#features.runtime.serverInfo.version ?? '',
undefined,
session.pairProgrammingMode
)
if (result.error.startsWith('ToolUse input is invalid JSON:')) {
content =
'Your toolUse input is incomplete, try again. If the error happens consistently, break this task down into multiple tool uses with smaller input. Do not apologize.'
shouldDisplayMessage = false
}
}
if (result.success && this.#toolUseLatencies.length > 0) {
// Clear latencies for the next LLM call
this.#toolUseLatencies = []
}
currentRequestInput = this.#updateRequestInputWithToolResults(currentRequestInput, toolResults, content)
}

Expand Down Expand Up @@ -767,6 +823,11 @@
if (!toolUse.name || !toolUse.toolUseId) continue
session.toolUseLookup.set(toolUse.toolUseId, toolUse)

// Record the start time for this tool use for latency calculation
if (toolUse.toolUseId) {
this.#toolUseStartTimes[toolUse.toolUseId] = Date.now()
}

try {
// TODO: Can we move this check in the event parser before the stream completes?
const availableToolNames = this.#getTools(session).map(tool => tool.toolSpecification.name)
Expand Down Expand Up @@ -822,13 +883,23 @@
cachedButtonBlockId = await chatResultStream.writeResultBlock(confirmationResult)
const isExecuteBash = toolUse.name === 'executeBash'
if (isExecuteBash) {
this.#telemetryController.emitInteractWithAgenticChat('GeneratedCommand', tabId)
this.#telemetryController.emitInteractWithAgenticChat(
'GeneratedCommand',
tabId,
session.pairProgrammingMode,
session.getConversationType()
)
}
if (requiresAcceptance) {
await this.waitForToolApproval(toolUse, chatResultStream, cachedButtonBlockId, session)
}
if (isExecuteBash) {
this.#telemetryController.emitInteractWithAgenticChat('RunCommand', tabId)
this.#telemetryController.emitInteractWithAgenticChat(
'RunCommand',
tabId,
session.pairProgrammingMode,
session.getConversationType()
)
}
}
break
Expand Down Expand Up @@ -910,7 +981,12 @@
fileChange: { ...cachedToolUse.fileChange, after: doc?.getText() },
})
}
this.#telemetryController.emitInteractWithAgenticChat('GeneratedDiff', tabId)
this.#telemetryController.emitInteractWithAgenticChat(
'GeneratedDiff',
tabId,
session.pairProgrammingMode,
session.getConversationType()
)
await chatResultStream.writeResultBlock(chatResult)
break
default:
Expand All @@ -924,11 +1000,28 @@
}
this.#updateUndoAllState(toolUse, session)

if (toolUse.name) {
if (toolUse.name && toolUse.toolUseId) {
// Calculate latency if we have a start time for this tool use
let latency: number | undefined = undefined
if (this.#toolUseStartTimes[toolUse.toolUseId]) {
latency = Date.now() - this.#toolUseStartTimes[toolUse.toolUseId]
delete this.#toolUseStartTimes[toolUse.toolUseId]

if (latency !== undefined) {
this.#toolUseLatencies.push({
toolName: toolUse.name,
toolUseId: toolUse.toolUseId,
latency: latency,
})
}
}

this.#telemetryController.emitToolUseSuggested(
toolUse,
session.conversationId ?? '',
this.#features.runtime.serverInfo.version ?? ''
this.#features.runtime.serverInfo.version ?? '',
latency,
session.pairProgrammingMode
)
}
} catch (err) {
Expand Down Expand Up @@ -1576,6 +1669,7 @@

metric.setDimension('codewhispererCustomizationArn', this.#customizationArn)
metric.setDimension('languageServerVersion', this.#features.runtime.serverInfo.version)
metric.setDimension('enabled', session.pairProgrammingMode)
const profileArn = AmazonQTokenServiceManager.getInstance().getActiveProfileArn()
if (profileArn) {
this.#telemetryService.updateProfileArn(profileArn)
Expand Down Expand Up @@ -1619,7 +1713,8 @@
err: any,
errorMessageId: string,
tabId: string,
metric: Metric<CombinedConversationEvent>
metric: Metric<CombinedConversationEvent>,
agenticCodingMode: boolean
): ChatResult | ResponseError<ChatResult> {
const errorMessage = getErrorMessage(err)
const requestID = getRequestID(err) ?? ''
Expand All @@ -1629,13 +1724,20 @@
// use custom error message for unactionable errors (user-dependent errors like PromptCharacterLimit)
if (err.code && err.code in unactionableErrorCodes) {
const customErrMessage = unactionableErrorCodes[err.code as keyof typeof unactionableErrorCodes]
this.#telemetryController.emitMessageResponseError(tabId, metric.metric, requestID, customErrMessage)
this.#telemetryController.emitMessageResponseError(
tabId,
metric.metric,
requestID,
customErrMessage,
agenticCodingMode
)
} else {
this.#telemetryController.emitMessageResponseError(
tabId,
metric.metric,
requestID,
errorMessage ?? genericErrorMsg
errorMessage ?? genericErrorMsg,
agenticCodingMode
)
}

Expand Down Expand Up @@ -2178,6 +2280,10 @@
if (!toolUseEvent.stop && toolUseId) {
if (!toolUseStartTimes[toolUseId]) {
toolUseStartTimes[toolUseId] = Date.now()
// Also record in the class-level toolUseStartTimes for latency calculation
if (!this.#toolUseStartTimes[toolUseId]) {
this.#toolUseStartTimes[toolUseId] = Date.now()
}
this.#debug(`ToolUseEvent ${toolUseId} started`)
toolUseLoadingTimeouts[toolUseId] = setTimeout(async () => {
this.#debug(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ export class ChatSessionService {
public contextListSent: boolean = false
#abortController?: AbortController
#conversationId?: string
#conversationType: string = 'AgenticChat'
#deferredToolExecution: Record<string, DeferredHandler> = {}
#toolUseLookup: Map<
string,
Expand All @@ -37,6 +38,14 @@ export class ChatSessionService {
#approvedPaths: Set<string> = new Set<string>()
#serviceManager?: AmazonQBaseServiceManager

public getConversationType(): string {
return this.#conversationType
}

public setConversationType(value: string) {
this.#conversationType = value
}

public get conversationId(): string | undefined {
return this.#conversationId
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,41 @@ export class ChatTelemetryController {
}
}

public emitToolUseSuggested(toolUse: ToolUse, conversationId: string, languageServerVersion: string) {
public emitAgencticLoop_InvokeLLM(
requestId: string,
conversationId: string,
conversationType: string,
toolName: string[] | undefined,
toolUseId: string[] | undefined,
result: string,
languageServerVersion: string,
latency?: number[],
agenticCodingMode?: boolean
) {
this.#telemetry.emitMetric({
name: ChatTelemetryEventName.AgencticLoop_InvokeLLM,
data: {
[CONVERSATION_ID_METRIC_KEY]: conversationId,
cwsprChatConversationType: conversationType,
credentialStartUrl: this.#credentialsProvider.getConnectionMetadata()?.sso?.startUrl,
cwsprToolName: toolName,
cwsprToolUseId: toolUseId,
result,
languageServerVersion: languageServerVersion,
latency,
requestId,
enabled: agenticCodingMode,
},
})
}

public emitToolUseSuggested(
toolUse: ToolUse,
conversationId: string,
languageServerVersion: string,
latency?: number,
agenticCodingMode?: boolean
) {
this.#telemetry.emitMetric({
name: ChatTelemetryEventName.ToolUseSuggested,
data: {
Expand All @@ -178,21 +212,29 @@ export class ChatTelemetryController {
credentialStartUrl: this.#credentialsProvider.getConnectionMetadata()?.sso?.startUrl,
cwsprToolName: toolUse.name ?? '',
cwsprToolUseId: toolUse.toolUseId ?? '',
perfE2ELatency: latency,
result: 'Succeeded',
languageServerVersion: languageServerVersion,
enabled: agenticCodingMode,
},
})
}

public emitInteractWithAgenticChat(interactionType: AgenticChatInteractionType, tabId: string) {
public emitInteractWithAgenticChat(
interactionType: AgenticChatInteractionType,
tabId: string,
agenticCodingMode?: boolean,
conversationType?: string
) {
this.#telemetry.emitMetric({
name: ChatTelemetryEventName.InteractWithAgenticChat,
data: {
[CONVERSATION_ID_METRIC_KEY]: this.getConversationId(tabId) ?? '',
cwsprChatConversationType: 'AgenticChat',
cwsprChatConversationType: conversationType,
credentialStartUrl: this.#credentialsProvider.getConnectionMetadata()?.sso?.startUrl,
cwsprAgenticChatInteractionType: interactionType,
result: 'Succeeded',
enabled: agenticCodingMode,
},
})
}
Expand Down Expand Up @@ -221,6 +263,7 @@ export class ChatTelemetryController {
requestLength: metric.cwsprChatRequestLength,
responseLength: metric.cwsprChatResponseLength,
numberOfCodeBlocks: metric.cwsprChatResponseCodeSnippetCount,
agenticCodingMode: metric.enabled,
},
{
chatTriggerInteraction: metric.cwsprChatTriggerInteraction,
Expand Down Expand Up @@ -276,7 +319,8 @@ export class ChatTelemetryController {
tabId: string,
metric: Partial<CombinedConversationEvent>,
requestId?: string,
errorReason?: string
errorReason?: string,
agenticCodingMode?: boolean
) {
this.#telemetry.emitMetric({
name: ChatTelemetryEventName.MessageResponseError,
Expand All @@ -294,6 +338,7 @@ export class ChatTelemetryController {
reasonDesc: getTelemetryReasonDesc(errorReason),
credentialStartUrl: this.#credentialsProvider.getConnectionMetadata()?.sso?.startUrl,
result: 'Succeeded',
enabled: agenticCodingMode,
[CONVERSATION_ID_METRIC_KEY]: this.getConversationId(tabId),
languageServerVersion: metric.languageServerVersion,
},
Expand Down
Loading
Loading