Skip to content

Commit 04c73fa

Browse files
Connor ClarkDevtools-frontend LUCI CQ
authored andcommitted
[AI] Adopt single conversation type for performance conversations
This CL consolidates the AI conversation features (insights and main thread call trees) into a single conversation type. User facing changes: * When a trace initially loads, the AI Assistance panel is now usable for asking anything about the trace * Selecting an insight or an entry in the main-thread will add that to the current selected context for the active conversation. This is conveyed to the user via the context chip title * The user simply needs to expand an insight or click on a main-thread entry to "select" it for the AI agent. The "Debug with AI" buttons still exist, and that will additionally open the AI Assistance panel if it isn't open already * Moving from an "insight" to a "call-tree" context no longer starts a new conversation: it's all the same chat Technical refactors: * AgentFocus now has just one type, with the "insight" and "callTree" fields being optional to denote what the user has selected * There is now only one performance ConversationType Bug: 442392194 Change-Id: I9b49a53eeae54ed86897f594e1c2874a50bd1edf Reviewed-on: https://chromium-review.googlesource.com/c/devtools/devtools-frontend/+/6951783 Commit-Queue: Connor Clark <[email protected]> Auto-Submit: Connor Clark <[email protected]> Reviewed-by: Paul Irish <[email protected]>
1 parent 5e0e539 commit 04c73fa

15 files changed

+186
-284
lines changed

front_end/models/ai_assistance/AiHistoryStorage.ts

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,9 +13,7 @@ export const enum ConversationType {
1313
STYLING = 'freestyler',
1414
FILE = 'drjones-file',
1515
NETWORK = 'drjones-network-request',
16-
PERFORMANCE_CALL_TREE = 'drjones-performance',
17-
PERFORMANCE_INSIGHT = 'performance-insight',
18-
PERFORMANCE_FULL = 'drjones-performance-full',
16+
PERFORMANCE = 'drjones-performance-full',
1917
}
2018

2119
export const NOT_FOUND_IMAGE_DATA = '';

front_end/models/ai_assistance/ConversationHandler.test.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -327,7 +327,7 @@ describeWithMockConnection('ConversationHandler', () => {
327327

328328
const generator = await conversationHandler.handleExternalRequest({
329329
prompt: 'Please help me debug this problem',
330-
conversationType: AiAssistanceModel.ConversationType.PERFORMANCE_FULL,
330+
conversationType: AiAssistanceModel.ConversationType.PERFORMANCE,
331331
data: Timeline.TimelinePanel.TimelinePanel.instance().getOrCreateExternalAIConversationData(),
332332
});
333333
let response = await generator.next();

front_end/models/ai_assistance/ConversationHandler.ts

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ export interface ExternalPerformanceAIConversationData {
5050
}
5151

5252
export interface ExternalPerformanceRequestParameters {
53-
conversationType: ConversationType.PERFORMANCE_FULL;
53+
conversationType: ConversationType.PERFORMANCE;
5454
prompt: string;
5555
data: ExternalPerformanceAIConversationData;
5656
}
@@ -201,7 +201,7 @@ export class ConversationHandler {
201201
case ConversationType.STYLING: {
202202
return await this.#handleExternalStylingConversation(parameters.prompt, parameters.selector);
203203
}
204-
case ConversationType.PERFORMANCE_FULL:
204+
case ConversationType.PERFORMANCE:
205205
return await this.#handleExternalPerformanceConversation(parameters.prompt, parameters.data);
206206
case ConversationType.NETWORK:
207207
if (!parameters.requestUrl) {
@@ -348,10 +348,8 @@ export class ConversationHandler {
348348
agent = new FileAgent(options);
349349
break;
350350
}
351-
case ConversationType.PERFORMANCE_FULL:
352-
case ConversationType.PERFORMANCE_INSIGHT:
353-
case ConversationType.PERFORMANCE_CALL_TREE: {
354-
agent = new PerformanceAgent(options, conversationType);
351+
case ConversationType.PERFORMANCE: {
352+
agent = new PerformanceAgent(options);
355353
break;
356354
}
357355
}

front_end/models/ai_assistance/agents/PerformanceAgent.test.ts

Lines changed: 34 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@ import * as Trace from '../../trace/trace.js';
1919
import {
2020
type ActionResponse,
2121
AICallTree,
22-
ConversationType,
2322
PerformanceAgent,
2423
PerformanceTraceContext,
2524
PerformanceTraceFormatter,
@@ -39,11 +38,9 @@ describeWithEnvironment('PerformanceAgent', () => {
3938
describe('buildRequest', () => {
4039
it('builds a request with a model id', async () => {
4140
mockHostConfig('test model');
42-
const agent = new PerformanceAgent(
43-
{
44-
aidaClient: {} as Host.AidaClient.AidaClient,
45-
},
46-
ConversationType.PERFORMANCE_CALL_TREE);
41+
const agent = new PerformanceAgent({
42+
aidaClient: {} as Host.AidaClient.AidaClient,
43+
});
4744
assert.strictEqual(
4845
agent.buildRequest({text: 'test input'}, Host.AidaClient.Role.USER).options?.model_id,
4946
'test model',
@@ -52,11 +49,9 @@ describeWithEnvironment('PerformanceAgent', () => {
5249

5350
it('builds a request with a temperature', async () => {
5451
mockHostConfig('test model', 1);
55-
const agent = new PerformanceAgent(
56-
{
57-
aidaClient: {} as Host.AidaClient.AidaClient,
58-
},
59-
ConversationType.PERFORMANCE_CALL_TREE);
52+
const agent = new PerformanceAgent({
53+
aidaClient: {} as Host.AidaClient.AidaClient,
54+
});
6055
assert.strictEqual(
6156
agent.buildRequest({text: 'test input'}, Host.AidaClient.Role.USER).options?.temperature,
6257
1,
@@ -66,12 +61,10 @@ describeWithEnvironment('PerformanceAgent', () => {
6661
it('structure matches the snapshot', async () => {
6762
mockHostConfig('test model');
6863
sinon.stub(crypto, 'randomUUID').returns('sessionId' as `${string}-${string}-${string}-${string}-${string}`);
69-
const agent = new PerformanceAgent(
70-
{
71-
aidaClient: mockAidaClient([[{explanation: 'answer'}]]),
72-
serverSideLoggingEnabled: true,
73-
},
74-
ConversationType.PERFORMANCE_CALL_TREE);
64+
const agent = new PerformanceAgent({
65+
aidaClient: mockAidaClient([[{explanation: 'answer'}]]),
66+
serverSideLoggingEnabled: true,
67+
});
7568

7669
await Array.fromAsync(agent.run('question', {selected: null}));
7770
setUserAgentForTesting();
@@ -126,27 +119,20 @@ describeWithEnvironment('PerformanceAgent – call tree focus', () => {
126119
const aiCallTree = AICallTree.fromEvent(layoutEvt, parsedTrace);
127120
assert.exists(aiCallTree);
128121

129-
const agent = new PerformanceAgent(
130-
{
131-
aidaClient: mockAidaClient([[{
132-
explanation: 'This is the answer',
133-
metadata: {
134-
rpcGlobalId: 123,
135-
},
136-
}]]),
122+
const agent = new PerformanceAgent({
123+
aidaClient: mockAidaClient([[{
124+
explanation: 'This is the answer',
125+
metadata: {
126+
rpcGlobalId: 123,
137127
},
138-
ConversationType.PERFORMANCE_CALL_TREE);
128+
}]]),
129+
});
139130

140131
const context = PerformanceTraceContext.fromCallTree(aiCallTree);
141132
const responses = await Array.fromAsync(agent.run('test', {selected: context}));
142-
const expectedData = '\n\n' +
143-
`
144-
145-
146-
# Call tree:
147-
148-
1;Task;3;;;2
149-
2;Layout;3;3;;;S`.trim();
133+
const expectedData =
134+
new PerformanceTraceFormatter(context.getItem(), new Trace.EventsSerializer.EventsSerializer())
135+
.formatTraceSummary();
150136

151137
assert.deepEqual(responses, [
152138
{
@@ -157,9 +143,9 @@ describeWithEnvironment('PerformanceAgent – call tree focus', () => {
157143
},
158144
{
159145
type: ResponseType.CONTEXT,
160-
title: 'Analyzing call tree',
146+
title: 'Analyzing trace',
161147
details: [
162-
{title: 'Selected call tree', text: expectedData},
148+
{title: 'Trace', text: expectedData},
163149
],
164150
},
165151
{
@@ -190,11 +176,9 @@ describeWithEnvironment('PerformanceAgent – call tree focus', () => {
190176

191177
describe('enhanceQuery', () => {
192178
it('does not send the serialized calltree again if it is a followup chat about the same calltree', async () => {
193-
const agent = new PerformanceAgent(
194-
{
195-
aidaClient: {} as Host.AidaClient.AidaClient,
196-
},
197-
ConversationType.PERFORMANCE_CALL_TREE);
179+
const agent = new PerformanceAgent({
180+
aidaClient: {} as Host.AidaClient.AidaClient,
181+
});
198182

199183
const mockAiCallTree = {
200184
serialize: () => 'Mock call tree',
@@ -264,13 +248,19 @@ const FAKE_PARSED_TRACE = {
264248
} as unknown as Trace.TraceModel.ParsedTrace;
265249

266250
function createAgentForInsightConversation(opts: {aidaClient?: Host.AidaClient.AidaClient} = {}) {
267-
return new PerformanceAgent({aidaClient: opts.aidaClient ?? mockAidaClient()}, ConversationType.PERFORMANCE_INSIGHT);
251+
return new PerformanceAgent({aidaClient: opts.aidaClient ?? mockAidaClient()});
268252
}
269253

270-
describeWithEnvironment('PerformanceAgent – insight focus', () => {
254+
describeWithEnvironment('PerformanceAgent', () => {
255+
it('uses the min and max bounds of the trace as the origin', async function() {
256+
const parsedTrace = await TraceLoader.traceEngine(this, 'lcp-images.json.gz');
257+
const context = PerformanceTraceContext.full(parsedTrace);
258+
assert.strictEqual(context.getOrigin(), 'trace-658799706428-658804825864');
259+
});
260+
271261
it('outputs the right title for the selected insight', async () => {
272262
const context = PerformanceTraceContext.fromInsight(FAKE_PARSED_TRACE, FAKE_LCP_MODEL);
273-
assert.strictEqual(context.getTitle(), 'Trace: www.example.com');
263+
assert.strictEqual(context.getTitle(), 'Trace: www.example.com – LCP breakdown');
274264
});
275265

276266
// See b/405054694 for context on why we do this.
@@ -559,11 +549,3 @@ code
559549
});
560550
});
561551
});
562-
563-
describeWithEnvironment('PerformanceAgent – all focus', function() {
564-
it('uses the min and max bounds of the trace as the origin', async function() {
565-
const parsedTrace = await TraceLoader.traceEngine(this, 'lcp-images.json.gz');
566-
const context = PerformanceTraceContext.full(parsedTrace);
567-
assert.strictEqual(context.getOrigin(), 'trace-658799706428-658804825864');
568-
});
569-
});

front_end/models/ai_assistance/agents/PerformanceAgent.ts

Lines changed: 34 additions & 65 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ import * as Root from '../../../core/root/root.js';
1111
import * as SDK from '../../../core/sdk/sdk.js';
1212
import * as Tracing from '../../../services/tracing/tracing.js';
1313
import * as Trace from '../../trace/trace.js';
14-
import type {ConversationType} from '../AiHistoryStorage.js';
14+
import {ConversationType} from '../AiHistoryStorage.js';
1515
import {
1616
PerformanceInsightFormatter,
1717
TraceEventFormatter,
@@ -22,7 +22,6 @@ import {AICallTree} from '../performance/AICallTree.js';
2222
import {AgentFocus} from '../performance/AIContext.js';
2323

2424
import {
25-
type AgentOptions,
2625
AiAgent,
2726
type ContextResponse,
2827
ConversationContext,
@@ -38,7 +37,6 @@ const UIStringsNotTranslated = {
3837
*@description Shown when the agent is investigating a trace
3938
*/
4039
analyzingTrace: 'Analyzing trace',
41-
analyzingCallTree: 'Analyzing call tree',
4240
/**
4341
* @description Shown when the agent is investigating network activity
4442
*/
@@ -196,7 +194,15 @@ export class PerformanceTraceContext extends ConversationContext<AgentFocus> {
196194
url = new URL(focus.parsedTrace.data.Meta.mainFrameURL);
197195
}
198196

199-
return `Trace: ${url.hostname}`;
197+
const parts = [`Trace: ${url.hostname}`];
198+
if (focus.insight) {
199+
parts.push(focus.insight.title);
200+
}
201+
if (focus.callTree) {
202+
const node = focus.callTree.selectedNode ?? focus.callTree.rootNode;
203+
parts.push(Trace.Name.forEntry(node.event));
204+
}
205+
return parts.join(' – ');
200206
}
201207

202208
/**
@@ -206,7 +212,7 @@ export class PerformanceTraceContext extends ConversationContext<AgentFocus> {
206212
override async getSuggestions(): Promise<[ConversationSuggestion, ...ConversationSuggestion[]]|undefined> {
207213
const focus = this.#focus.data;
208214

209-
if (focus.type !== 'insight') {
215+
if (!focus.insight) {
210216
return;
211217
}
212218

@@ -217,30 +223,16 @@ export class PerformanceTraceContext extends ConversationContext<AgentFocus> {
217223
// 16k Tokens * ~4 char per token.
218224
const MAX_FUNCTION_RESULT_BYTE_LENGTH = 16384 * 4;
219225

220-
/**
221-
* Union of all the performance conversation types, which are all implemented by this file.
222-
* This temporary until all Performance Panel AI features use the "Full" type. go/chrome-devtools:more-powerful-performance-agent-design
223-
*/
224-
type PerformanceConversationType =
225-
ConversationType.PERFORMANCE_FULL|ConversationType.PERFORMANCE_CALL_TREE|ConversationType.PERFORMANCE_INSIGHT;
226-
227226
/**
228227
* One agent instance handles one conversation. Create a new agent
229228
* instance for a new conversation.
230229
*/
231230
export class PerformanceAgent extends AiAgent<AgentFocus> {
232-
// TODO: would make more sense on AgentOptions
233-
#conversationType: PerformanceConversationType;
234231
#formatter: PerformanceTraceFormatter|null = null;
235232
#lastInsightForEnhancedQuery: Trace.Insights.Types.InsightModel|undefined;
236233
#eventsSerializer = new Trace.EventsSerializer.EventsSerializer();
237234
#lastFocusHandledForContextDetails: AgentFocus|null = null;
238235

239-
constructor(opts: AgentOptions, conversationType: PerformanceConversationType) {
240-
super(opts);
241-
this.#conversationType = conversationType;
242-
}
243-
244236
/**
245237
* Cache of all function calls made by the agent. This allows us to include (as a
246238
* fact) every function call to conversation requests, allowing the AI to access
@@ -285,7 +277,7 @@ export class PerformanceAgent extends AiAgent<AgentFocus> {
285277
}
286278

287279
getConversationType(): ConversationType {
288-
return this.#conversationType;
280+
return ConversationType.PERFORMANCE;
289281
}
290282

291283
#lookupEvent(key: Trace.Types.File.SerializableKey): Trace.Types.Events.Event|null {
@@ -318,31 +310,16 @@ export class PerformanceAgent extends AiAgent<AgentFocus> {
318310

319311
this.#lastFocusHandledForContextDetails = focus;
320312

321-
if (focus.data.type === 'full' || focus.data.type === 'insight') {
322-
yield {
323-
type: ResponseType.CONTEXT,
324-
title: lockedString(UIStringsNotTranslated.analyzingTrace),
325-
details: [
326-
{
327-
title: 'Trace',
328-
text: this.#formatter?.formatTraceSummary() ?? '',
329-
},
330-
],
331-
};
332-
} else if (focus.data.type === 'call-tree') {
333-
yield {
334-
type: ResponseType.CONTEXT,
335-
title: lockedString(UIStringsNotTranslated.analyzingCallTree),
336-
details: [
337-
{
338-
title: 'Selected call tree',
339-
text: focus.data.callTree.serialize(),
340-
},
341-
],
342-
};
343-
} else {
344-
Platform.assertNever(focus.data, 'Unknown agent focus');
345-
}
313+
yield {
314+
type: ResponseType.CONTEXT,
315+
title: lockedString(UIStringsNotTranslated.analyzingTrace),
316+
details: [
317+
{
318+
title: 'Trace',
319+
text: this.#formatter?.formatTraceSummary() ?? '',
320+
},
321+
],
322+
};
346323
}
347324

348325
#callTreeContextSet = new WeakSet();
@@ -378,12 +355,9 @@ export class PerformanceAgent extends AiAgent<AgentFocus> {
378355
this.#declareFunctions(context);
379356

380357
const focus = context.getItem();
358+
const selected: string[] = [];
381359

382-
if (focus.data.type === 'full') {
383-
return query;
384-
}
385-
386-
if (focus.data.type === 'call-tree') {
360+
if (focus.data.callTree) {
387361
// If this is a followup chat about the same call tree, don't include the call tree serialization again.
388362
// We don't need to repeat it and we'd rather have more the context window space.
389363
let contextString = '';
@@ -392,17 +366,12 @@ export class PerformanceAgent extends AiAgent<AgentFocus> {
392366
this.#callTreeContextSet.add(focus.data.callTree);
393367
}
394368

395-
if (!contextString) {
396-
return query;
369+
if (contextString) {
370+
selected.push(`User selected the following call tree:\n\n${contextString}\n\n`);
397371
}
398-
399-
let enhancedQuery = '';
400-
enhancedQuery += `User selected the following call tree:\n\n${contextString}\n\n`;
401-
enhancedQuery += `# User query\n\n${query}`;
402-
return enhancedQuery;
403372
}
404373

405-
if (focus.data.type === 'insight') {
374+
if (focus.data.insight) {
406375
// We only need to add Insight info to a prompt when the context changes. For example:
407376
// User clicks Insight A. We need to send info on Insight A with the prompt.
408377
// User asks follow up question. We do not need to resend Insight A with the prompt.
@@ -411,17 +380,17 @@ export class PerformanceAgent extends AiAgent<AgentFocus> {
411380
const includeInsightInfo = focus.data.insight !== this.#lastInsightForEnhancedQuery;
412381
this.#lastInsightForEnhancedQuery = focus.data.insight;
413382

414-
if (!includeInsightInfo) {
415-
return query;
383+
if (includeInsightInfo) {
384+
selected.push(`User selected the ${focus.data.insight.insightKey} insight.\n\n`);
416385
}
386+
}
417387

418-
let enhancedQuery = '';
419-
enhancedQuery += `User selected the ${focus.data.insight.insightKey} insight.\n\n`;
420-
enhancedQuery += `# User query\n\n${query}`;
421-
return enhancedQuery;
388+
if (!selected.length) {
389+
return query;
422390
}
423391

424-
Platform.assertNever(focus.data, 'Unknown agent focus');
392+
selected.push(`# User query\n\n${query}`);
393+
return selected.join('');
425394
}
426395

427396
override async * run(initialQuery: string, options: {

0 commit comments

Comments
 (0)