Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions src/extension/extension/vscode-node/contributions.ts
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ import { OnboardTerminalTestsContribution } from '../../onboardDebug/vscode-node
import { DebugCommandsContribution } from '../../prompt/vscode-node/debugCommands';
import { RenameSuggestionsContrib } from '../../prompt/vscode-node/renameSuggestions';
import { PromptFileContextContribution } from '../../promptFileContext/vscode-node/promptFileContextService';
import { PromptSaveCommands } from '../../promptSave/vscode-node/commands';
import { RelatedFilesProviderContribution } from '../../relatedFiles/vscode-node/relatedFiles.contribution';
import { ChatReplayContribution } from '../../replay/vscode-node/chatReplayContrib';
import { SearchPanelCommands } from '../../search/vscode-node/commands';
Expand Down Expand Up @@ -108,5 +109,6 @@ export const vscodeNodeChatContributions: IExtensionContributionFactory[] = [
asContributionFactory(RelatedFilesProviderContribution),
asContributionFactory(BYOKContrib),
asContributionFactory(McpSetupCommands),
asContributionFactory(PromptSaveCommands),
newWorkspaceContribution,
];
49 changes: 49 additions & 0 deletions src/extension/promptSave/common/types.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
/*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/

/**
* Command names for prompt save cross-repository communication
*/
export const PROMPT_SAVE_CHECK_COMMAND = 'github.copilot.chat.prompt.save.check';
export const PROMPT_SAVE_ANALYZE_COMMAND = 'github.copilot.chat.prompt.save.analyze';

/**
* Input arguments for conversation analysis
*/
export interface IAnalyzeConversationArgs {
/**
* Array of conversation turns (user/assistant message pairs)
*/
readonly turns: Array<{
readonly role: 'user' | 'assistant';
readonly content: string;
}>;

/**
* Optional: The current user query that triggered the save
*/
readonly currentQuery?: string;
}

/**
* Output from prompt save analysis
*/
export interface IPromptTaskSave {
/**
* Suggested filename in kebab-case (without .prompt.md extension)
* Example: "generate-unit-tests"
*/
readonly title: string;

/**
* Brief description of the prompt's purpose (1-2 sentences)
*/
readonly description: string;

/**
* Generalized prompt text that can be reused for similar tasks
*/
readonly prompt: string;
}
151 changes: 151 additions & 0 deletions src/extension/promptSave/vscode-node/commands.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,151 @@
/*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/

import * as vscode from 'vscode';
import { ChatFetchResponseType, ChatLocation } from '../../../platform/chat/common/commonTypes';
import { IEndpointProvider } from '../../../platform/endpoint/common/endpointProvider';
import { ILogService } from '../../../platform/log/common/logService';
import { ITelemetryService } from '../../../platform/telemetry/common/telemetry';
import { CancellationTokenSource } from '../../../util/vs/base/common/cancellation';
import { Disposable } from '../../../util/vs/base/common/lifecycle';
import { IInstantiationService } from '../../../util/vs/platform/instantiation/common/instantiation';
import { Turn } from '../../prompt/common/conversation';
import { PromptRenderer } from '../../prompts/node/base/promptRenderer';
import { IAnalyzeConversationArgs, IPromptTaskSave, PROMPT_SAVE_ANALYZE_COMMAND, PROMPT_SAVE_CHECK_COMMAND } from '../common/types';
import { PromptSavePrompt } from './promptSavePrompt';

export { PROMPT_SAVE_ANALYZE_COMMAND, PROMPT_SAVE_CHECK_COMMAND } from '../common/types';

export class PromptSaveCommands extends Disposable {
constructor(
@ITelemetryService private readonly telemetryService: ITelemetryService,
@IInstantiationService private readonly instantiationService: IInstantiationService,
@IEndpointProvider private readonly endpointProvider: IEndpointProvider,
@ILogService private readonly logService: ILogService,
) {
super();

this._register(vscode.commands.registerCommand(
PROMPT_SAVE_CHECK_COMMAND,
() => this.checkAvailability()
));

this._register(vscode.commands.registerCommand(
PROMPT_SAVE_ANALYZE_COMMAND,
(args: IAnalyzeConversationArgs) => this.analyzeConversation(args)
));
}

private async checkAvailability(): Promise<boolean> {
// Check if LLM analysis is available
try {
const models = await vscode.lm.selectChatModels({ family: 'gpt-4o' });
return models.length > 0;
} catch {
return false;
}
}

private async analyzeConversation(args: IAnalyzeConversationArgs): Promise<IPromptTaskSave | undefined> {
const startTime = Date.now();

try {
// Convert input turns to conversation Turn objects
const history: Turn[] = args.turns.map(turn => {
return new Turn(
undefined,
{
type: turn.role === 'user' ? 'user' : 'model',
message: turn.content
}
);
});

// Get endpoint for free mini model
const endpoint = await this.endpointProvider.getChatEndpoint('gpt-4.1');

// Render the prompt using PromptRenderer to preserve DI and endpoint wiring
const renderer = PromptRenderer.create(
this.instantiationService,
endpoint,
PromptSavePrompt,
{ history, currentQuery: args.currentQuery }
);
const { messages } = await renderer.render();

// Create cancellation token
const cts = new CancellationTokenSource();

// Send request to LLM
const response = await endpoint.makeChatRequest(
'prompt-save',
messages,
undefined,
cts.token,
ChatLocation.Panel,
undefined,
undefined,
false
);

if (cts.token.isCancellationRequested) {
return undefined;
}

if (response.type !== ChatFetchResponseType.Success) {
throw new Error(`Chat request failed: ${response.reason}`);
}

// Extract JSON from markdown code block
const analysis = this.parseAnalysisResponse(response.value);

this.telemetryService.sendMSFTTelemetryEvent('chat.promptSave.success', {
durationMs: String(Date.now() - startTime),
turnCount: String(args.turns.length),
});

return analysis;

} catch (error) {
this.logService.error(`Prompt save analysis failed: ${error instanceof Error ? error.message : String(error)}`);

this.telemetryService.sendMSFTTelemetryEvent('chat.promptSave.error', {
durationMs: String(Date.now() - startTime),
error: error instanceof Error ? error.message : String(error),
});

// Return undefined on error - VS Code will fall back to simple save
return undefined;
}
}

private parseAnalysisResponse(response: string): IPromptTaskSave {
// Extract JSON from markdown code block
const jsonMatch = response.match(/```json\s*\n([\s\S]*?)\n```/);
if (!jsonMatch) {
throw new Error('No JSON code block found in response');
}

const parsed = JSON.parse(jsonMatch[1]);

// Validate required fields
if (!parsed.title || !parsed.description || !parsed.prompt) {
throw new Error('Invalid analysis response: missing required fields');
}

// Sanitize title to ensure it's valid kebab-case
const sanitizedTitle = parsed.title
.toLowerCase()
.replace(/[^a-z0-9-]/g, '-')
.replace(/-+/g, '-')
.replace(/^-|-$/g, '');

return {
title: sanitizedTitle,
description: parsed.description,
prompt: parsed.prompt
};
}
}
95 changes: 95 additions & 0 deletions src/extension/promptSave/vscode-node/promptSavePrompt.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
/*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/

import { BasePromptElementProps, PromptElement, SystemMessage, TextChunk, UserMessage } from '@vscode/prompt-tsx';
import { Turn } from '../../prompt/common/conversation';
import { InstructionMessage } from '../../prompts/node/base/instructionMessage';
import { ResponseTranslationRules } from '../../prompts/node/base/responseTranslationRules';
import { SafetyRules } from '../../prompts/node/base/safetyRules';
import { Tag } from '../../prompts/node/base/tag';
import { HistoryWithInstructions } from '../../prompts/node/panel/conversationHistory';

export interface PromptSavePromptProps extends BasePromptElementProps {
/**
* The conversation history to analyze
*/
readonly history: readonly Turn[];

/**
* Optional: The user's final query that triggered the save
*/
readonly currentQuery?: string;
}

/**
* Prompt for analyzing chat conversations and extracting reusable prompt tasks.
* Used by the /save command to generate prompt file metadata.
*/
export class PromptSavePrompt extends PromptElement<PromptSavePromptProps> {
override render() {
return (
<>
<SystemMessage priority={1000}>
You are an expert at analyzing chat conversations and extracting reusable prompt patterns. Your task is to analyze a conversation between a user and an AI assistant, then create a generalized, reusable prompt task definition.<br />
<SafetyRules />
</SystemMessage>
<HistoryWithInstructions historyPriority={800} passPriority history={this.props.history}>
<InstructionMessage priority={1000}>
<PromptSaveRules />
<ResponseTranslationRules />
</InstructionMessage>
</HistoryWithInstructions>
<UserMessage priority={900}>
{this.props.currentQuery && <>Current request: {this.props.currentQuery}<br /><br /></>}
Analyze the conversation above and extract a reusable prompt task. Return your analysis as a JSON object wrapped in a markdown code block with triple backticks (```json).<br />
<br />
The JSON object must match this structure:<br />
<Tag name='schema'>
<TextChunk breakOnWhitespace>
{`{
"title": "kebab-case-filename",
"description": "Brief description of the prompt's purpose (1-2 sentences)",
"prompt": "Generalized prompt text that can be reused for similar tasks"
}`}
</TextChunk>
</Tag>
</UserMessage>
</>
);
}
}

class PromptSaveRules extends PromptElement {
render() {
return (
<>
Think step by step:<br />
1. Review the conversation to identify the user's primary goal or task pattern<br />
2. Extract the core intent, removing conversation-specific details (e.g., specific file names, variable names, or project-specific context)<br />
3. Identify any recurring instructions, constraints, or requirements that define how the task should be approached<br />
4. Generalize the task into a reusable prompt that could apply to similar scenarios<br />
5. Create a very concise action-oriented title in kebab-case format that will be used for the slash command (1-3 words, e.g., "generate-unit-tests", "refactor-for-performance", "explain-api-design", etc)<br />
6. Write a brief description (1 sentence, max 15 words) explaining the goal of the prompt<br />
7. Craft the generalized multi-line markdown text prompt, using placeholders where appropriate (e.g., "the selected code", "the current file", "the specified functionality")<br />
<br />
Guidelines for creating the prompt:<br />
- Focus on the pattern of interaction, not specific implementation details<br />
- Preserve important constraints or requirements (e.g., "follow test-driven development", "maintain backward compatibility")<br />
- Use general terms rather than specific names (e.g., "the function" instead of "calculateTotal")<br />
- Keep the prompt concise but complete (reflecting the complexity of the task) - it should provide clear direction without unnecessary verbosity<br />
- The prompt should work as a standalone instruction that captures the essence of the conversation's goal<br />
<br />
Example good titles:<br />
- "review-pr-changes"<br />
- "add-error-handling"<br />
- "write-api-docs"<br />
- "optimize-database-query"<br />
- "migrate-to-typescript"<br />
<br />
Return ONLY the JSON object in a markdown code block. Do not include explanations or additional prose.
</>
);
}
}
Loading