Skip to content

Commit 0778dc0

Browse files
committed
feat: AI conversation memory, on-demand AI toggle, fix config bug
- Debug AI chat now includes message history for multi-turn conversations - Public debug endpoint stores messages and applies PII redaction - Add PATCH endpoints to enable AI on any debug session (auth + token-based) - Debug viewer shows Ask AI button on all sessions, enables on click - Fix aiAutoAnalyze not returned by getConfig, consolidate config update - Add llm-product-discipline Cursor rule
1 parent 2c113ed commit 0778dc0

File tree

5 files changed

+216
-18
lines changed

5 files changed

+216
-18
lines changed
Lines changed: 88 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,88 @@
1+
---
2+
description: Pre-implementation gate that forces product-value, architecture, and cleanup checks before coding any new feature.
3+
alwaysApply: true
4+
---
5+
6+
# LLM Product Discipline
7+
8+
## Purpose
9+
Use this skill before implementing any new feature, enhancement, or prototype.
10+
11+
The goal is to prevent:
12+
- shipping low-value features
13+
- architecture decay
14+
- hacky iterations
15+
- confusing prototypes with real product thinking
16+
17+
## Trigger
18+
Use this skill when:
19+
- the user asks to add a feature
20+
- the user asks for a prototype
21+
- the user asks for a quick implementation
22+
- a change touches architecture or existing flows
23+
- the request sounds easy but product value is unclear
24+
25+
## Required behavior
26+
Before writing code, do ALL of the following:
27+
28+
### Step 1: Restate the request
29+
Summarize the requested feature in 1-2 sentences.
30+
31+
### Step 2: Evaluate product value
32+
Answer:
33+
- Who is this for?
34+
- What problem does it solve?
35+
- How painful/frequent is the problem?
36+
- What happens if we do not build it?
37+
- Is this core, leverage, or convenience?
38+
39+
If the request is weak, say so clearly.
40+
41+
### Step 3: Evaluate architecture
42+
Answer:
43+
- Does this fit the current design?
44+
- Would implementing it require hacks, duplication, or awkward exceptions?
45+
- Should the design be refactored first?
46+
47+
If refactor is needed, recommend refactor before implementation.
48+
49+
### Step 4: Evaluate whether cleanup is higher ROI
50+
Answer:
51+
- Is fixing or simplifying existing code higher value than adding this feature?
52+
- What existing issues should be cleaned first?
53+
54+
### Step 5: Give a decision
55+
Choose one:
56+
- BUILD NOW
57+
- REFACTOR FIRST
58+
- DO NOT BUILD YET
59+
60+
Explain why.
61+
62+
## Output format
63+
Always respond in this structure before coding:
64+
65+
### Feature Summary
66+
...
67+
68+
### Value Check
69+
...
70+
71+
### Architecture Check
72+
...
73+
74+
### Cleanup Check
75+
...
76+
77+
### Decision
78+
BUILD NOW / REFACTOR FIRST / DO NOT BUILD YET
79+
80+
### If Building
81+
Describe the smallest correct implementation.
82+
83+
## Rules
84+
- Do not jump straight into code for feature requests.
85+
- Do not treat "easy to code" as evidence of value.
86+
- Do not patch bad architecture with LLM-generated hacks.
87+
- Prefer fewer, higher-conviction features.
88+
- Leave the system cleaner than you found it.

apps/api/src/ai/ai.controller.ts

Lines changed: 4 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -88,15 +88,10 @@ export class AIController {
8888
}
8989
}
9090

91-
await this.aiService.updateConfig(workspaceId, body);
92-
93-
// Also update autoAnalyze if provided
94-
if (body.autoAnalyze !== undefined) {
95-
await this.prisma.workspace.update({
96-
where: { id: workspaceId },
97-
data: { aiAutoAnalyze: body.autoAnalyze },
98-
});
99-
}
91+
await this.aiService.updateConfig(workspaceId, {
92+
...body,
93+
autoAnalyze: body.autoAnalyze,
94+
});
10095

10196
return { success: true };
10297
}

apps/api/src/ai/ai.service.ts

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ export interface AIConfig {
77
model: string;
88
apiKey?: string;
99
ollamaUrl?: string;
10+
autoAnalyze?: boolean;
1011
}
1112

1213
export interface AnalysisRequest {
@@ -58,6 +59,7 @@ export class AIService {
5859
aiModel: true,
5960
aiApiKey: true,
6061
aiOllamaUrl: true,
62+
aiAutoAnalyze: true,
6163
},
6264
});
6365

@@ -68,6 +70,7 @@ export class AIService {
6870
model: workspace.aiModel || this.getDefaultModel(workspace.aiProvider),
6971
apiKey: workspace.aiApiKey ? decryptField(workspace.aiApiKey) : undefined,
7072
ollamaUrl: workspace.aiOllamaUrl || this.defaultOllamaUrl,
73+
autoAnalyze: workspace.aiAutoAnalyze,
7174
};
7275
}
7376

@@ -82,6 +85,7 @@ export class AIService {
8285
aiModel: config.model,
8386
aiApiKey: config.apiKey != null ? encryptField(config.apiKey) : config.apiKey,
8487
aiOllamaUrl: config.ollamaUrl,
88+
...(config.autoAnalyze !== undefined && { aiAutoAnalyze: config.autoAnalyze }),
8589
},
8690
});
8791
}

apps/api/src/debug/debug.controller.ts

Lines changed: 83 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ import {
22
Controller,
33
Get,
44
Post,
5+
Patch,
56
Delete,
67
Param,
78
Body,
@@ -15,6 +16,7 @@ import {
1516
Optional,
1617
} from '@nestjs/common';
1718
import { DebugService } from './debug.service';
19+
import { PrismaService } from '../prisma/prisma.service';
1820
import { AuthGuard } from '../auth/auth.guard';
1921
import { CombinedAuthGuard } from '../auth/combined-auth.guard';
2022
import { AIService } from '../ai/ai.service';
@@ -35,6 +37,7 @@ interface CreateSessionDto {
3537
export class DebugController {
3638
constructor(
3739
private debugService: DebugService,
40+
private prisma: PrismaService,
3841
@Optional() @Inject(forwardRef(() => AIService))
3942
private aiService?: AIService,
4043
) {}
@@ -135,6 +138,51 @@ export class DebugController {
135138
return { success: true };
136139
}
137140

141+
/**
142+
* Toggle AI on a debug session
143+
*/
144+
@Patch('sessions/:id/ai')
145+
@UseGuards(AuthGuard)
146+
async toggleAI(
147+
@Param('id') id: string,
148+
@Body() body: { enabled: boolean },
149+
) {
150+
const session = await this.debugService.getSession(id);
151+
if (!session) {
152+
throw new NotFoundException('Session not found');
153+
}
154+
155+
await this.prisma.debugSession.update({
156+
where: { id },
157+
data: { aiEnabled: body.enabled },
158+
});
159+
160+
return { success: true, aiEnabled: body.enabled };
161+
}
162+
163+
/**
164+
* Toggle AI on a debug session (public, token-based)
165+
*/
166+
@Patch('public/:token/ai/enable')
167+
@UseGuards(RateLimitGuard)
168+
@RateLimit('debug')
169+
async toggleAIPublic(
170+
@Param('token') token: string,
171+
@Body() body: { enabled: boolean },
172+
) {
173+
const session = await this.debugService.getSessionByToken(token);
174+
if (!session) {
175+
throw new NotFoundException('Session not found or expired');
176+
}
177+
178+
await this.prisma.debugSession.update({
179+
where: { id: session.id },
180+
data: { aiEnabled: body.enabled },
181+
});
182+
183+
return { success: true, aiEnabled: body.enabled };
184+
}
185+
138186
/**
139187
* End a debug session by token (public - requires session token to prevent unauthorized termination)
140188
*/
@@ -567,14 +615,45 @@ export class DebugController {
567615

568616
// Build context from packet data
569617
const packets = body.packetContext || [];
570-
618+
619+
// Load conversation history for multi-turn context
620+
const history = await this.prisma.debugAIMessage.findMany({
621+
where: { sessionId: session.id },
622+
orderBy: { createdAt: 'asc' },
623+
take: 20,
624+
});
625+
626+
const ai = this.aiService!;
627+
const shouldRedact = config.provider !== 'ollama';
628+
const messages = [
629+
...history.map(m => ({
630+
role: m.role as 'user' | 'assistant',
631+
content: shouldRedact ? ai.redactPII(m.content) : m.content,
632+
})),
633+
{ role: 'user' as const, content: shouldRedact ? ai.redactPII(body.message) : body.message },
634+
];
635+
571636
try {
572-
const response = await this.aiService.chat(
637+
const response = await ai.chat(
573638
config,
574-
[{ role: 'user', content: body.message }],
575-
{ packets },
639+
messages,
640+
{ packets: shouldRedact
641+
? packets.map((p: any) => ({
642+
...p,
643+
parsed: p.parsed ? JSON.parse(ai.redactPII(JSON.stringify(p.parsed))) : undefined,
644+
}))
645+
: packets,
646+
},
576647
);
577648

649+
// Store messages for conversation history
650+
await this.prisma.debugAIMessage.createMany({
651+
data: [
652+
{ sessionId: session.id, role: 'user', content: body.message },
653+
{ sessionId: session.id, role: 'assistant', content: response.content, tokensUsed: response.tokensUsed },
654+
],
655+
});
656+
578657
return {
579658
response: response.content,
580659
tokensUsed: response.tokensUsed,

apps/web/pages/debug/[token].vue

Lines changed: 37 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -107,15 +107,19 @@
107107
</button>
108108
<!-- AI Chat Toggle -->
109109
<button
110-
v-if="session?.aiEnabled"
111-
@click="showAIChat = !showAIChat"
110+
@click="toggleAIChat"
111+
:disabled="enablingAI"
112112
class="px-2.5 sm:px-3 py-1.5 text-xs sm:text-sm font-medium rounded-lg transition-all flex items-center gap-1.5 sm:gap-2"
113113
:class="showAIChat ? 'bg-blue-300 text-black' : 'bg-blue-300/10 border border-blue-300/10 text-blue-300 hover:bg-blue-300/20'"
114114
>
115-
<svg class="w-3.5 h-3.5 sm:w-4 sm:h-4" fill="currentColor" viewBox="0 0 20 20">
115+
<svg v-if="enablingAI" class="animate-spin w-3.5 h-3.5 sm:w-4 sm:h-4" viewBox="0 0 24 24">
116+
<circle class="opacity-25" cx="12" cy="12" r="10" stroke="currentColor" stroke-width="4" fill="none"></circle>
117+
<path class="opacity-75" fill="currentColor" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"></path>
118+
</svg>
119+
<svg v-else class="w-3.5 h-3.5 sm:w-4 sm:h-4" fill="currentColor" viewBox="0 0 20 20">
116120
<path d="M10 2a1 1 0 011 1v1.323l3.954 1.582 1.599-.8a1 1 0 01.894 1.79l-1.233.616 1.738 5.42a1 1 0 01-.285 1.05A3.989 3.989 0 0115 15a3.989 3.989 0 01-2.667-1.019 1 1 0 01-.285-1.05l1.715-5.349L11 6.477V16h2a1 1 0 110 2H7a1 1 0 110-2h2V6.477L6.237 7.582l1.715 5.349a1 1 0 01-.285 1.05A3.989 3.989 0 015 15a3.989 3.989 0 01-2.667-1.019 1 1 0 01-.285-1.05l1.738-5.42-1.233-.617a1 1 0 01.894-1.788l1.599.799L9 4.323V3a1 1 0 011-1z"/>
117121
</svg>
118-
<span class="hidden sm:inline">{{ showAIChat ? 'Hide AI' : 'Ask AI' }}</span>
122+
<span class="hidden sm:inline">{{ enablingAI ? 'Enabling...' : showAIChat ? 'Hide AI' : 'Ask AI' }}</span>
119123
</button>
120124
</div>
121125
</div>
@@ -534,7 +538,7 @@
534538
</div>
535539

536540
<!-- AI Chat Panel -->
537-
<div v-if="session?.aiEnabled && showAIChat" class="mt-6">
541+
<div v-if="aiEnabled && showAIChat" class="mt-6">
538542
<div class="bg-gray-500/5 border border-gray-500/10 rounded-xl overflow-hidden">
539543
<!-- Chat Header -->
540544
<div class="border-b border-gray-500/10 p-4 flex items-center justify-between">
@@ -736,6 +740,34 @@ const showAIChat = ref(false);
736740
const aiInput = ref('');
737741
const aiMessages = ref<Array<{ role: 'user' | 'assistant'; content: string }>>([]);
738742
const aiThinking = ref(false);
743+
const enablingAI = ref(false);
744+
const aiEnabled = ref(false);
745+
746+
watch(() => session.value?.aiEnabled, (val) => {
747+
if (val !== undefined) aiEnabled.value = val;
748+
}, { immediate: true });
749+
750+
const toggleAIChat = async () => {
751+
if (!aiEnabled.value) {
752+
enablingAI.value = true;
753+
try {
754+
const token = route.params.token as string;
755+
const res = await fetch(`${config.public.apiUrl}/v1/debug/public/${token}/ai/enable`, {
756+
method: 'PATCH',
757+
headers: { 'Content-Type': 'application/json' },
758+
body: JSON.stringify({ enabled: true }),
759+
});
760+
if (res.ok) {
761+
aiEnabled.value = true;
762+
showAIChat.value = true;
763+
}
764+
} finally {
765+
enablingAI.value = false;
766+
}
767+
} else {
768+
showAIChat.value = !showAIChat.value;
769+
}
770+
};
739771
740772
// Close tunnel state
741773
const showCloseModal = ref(false);

0 commit comments

Comments
 (0)