diff --git a/report-app/report-server.ts b/report-app/report-server.ts index dd2ef9a..4bcd0e6 100644 --- a/report-app/report-server.ts +++ b/report-app/report-server.ts @@ -10,7 +10,7 @@ import {fileURLToPath} from 'node:url'; import {chatWithReportAI} from '../runner/reporting/report-ai-chat'; import {convertV2ReportToV3Report} from '../runner/reporting/migrations/v2_to_v3'; import {FetchedLocalReports, fetchReportsFromDisk} from '../runner/reporting/report-local-disk'; -import {AiChatRequest, RunInfo} from '../runner/shared-interfaces'; +import {AiChatRequest, AIConfigState, RunInfo} from '../runner/shared-interfaces'; // This will result in a lot of loading and would slow down the serving, // so it's loaded lazily below. @@ -88,19 +88,42 @@ app.post('/api/reports/:id/chat', async (req, res) => { return; } - const {prompt, pastMessages, model} = req.body as AiChatRequest; - const assessments = reports.flatMap(run => run.results); - const abortController = new AbortController(); - const summary = await chatWithReportAI( - await (llm ?? getOrCreateGenkitLlmRunner()), - prompt, - abortController.signal, - assessments, - pastMessages, - model, - ); - - res.json(summary); + try { + const {prompt, pastMessages, model} = req.body as AiChatRequest; + const assessments = reports.flatMap(run => run.results); + const abortController = new AbortController(); + const summary = await chatWithReportAI( + await (llm ?? getOrCreateGenkitLlmRunner()), + prompt, + abortController.signal, + assessments, + pastMessages, + model, + ); + res.json(summary); + } catch (e) { + console.error(e); + if (e instanceof Error) { + console.error(e.stack); + } + res.status(500); + res.end(`Unexpected error. See terminal logs.`); + } +}); + +app.get('/api/ai-config-state', async (req, res) => { + try { + const llm = await getOrCreateGenkitLlmRunner(); + return res.json({ + configuredModels: llm.getSupportedModelsWithAPIKey(), + } satisfies AIConfigState); + } catch (e) { + console.error('Could not instantiate LLM instance. Error:', e); + if (e instanceof Error) { + console.error(e.stack); + } + return res.json({configuredModels: []}); + } }); app.use( diff --git a/report-app/src/app/pages/report-viewer/report-viewer.html b/report-app/src/app/pages/report-viewer/report-viewer.html index d3c2967..4b9d318 100644 --- a/report-app/src/app/pages/report-viewer/report-viewer.html +++ b/report-app/src/app/pages/report-viewer/report-viewer.html @@ -173,15 +173,17 @@

Repair System Prompt

@if (report.details.summary.aiSummary !== undefined) { - - + @defer (on interaction(aiAssistButton)) { + + } } @if (missingDeps().length > 0) { @@ -346,6 +348,8 @@
+ + }}%) diff --git a/report-app/src/app/shared/ai-assistant/ai-assistant.html b/report-app/src/app/shared/ai-assistant/ai-assistant.html index d9df3c6..2f2ae63 100644 --- a/report-app/src/app/shared/ai-assistant/ai-assistant.html +++ b/report-app/src/app/shared/ai-assistant/ai-assistant.html @@ -6,8 +6,8 @@

AI Assistant

@@ -29,19 +29,34 @@

AI Assistant

- @for (message of messages; track $index) { -
-
-
- } - @if (isLoading()) { -
- -
+ @if (aiConfigState.isLoading()) { + + } @else if (aiConfigState.error() !== undefined) { + Error fetching available models. + } @else if (models().length === 0) { + @if (models().length === 0) { + No models available. Make sure you set API keys as per + Setup instructions. + } + } @else { + @for (message of messages; track $index) { +
+
+
+ } + @if (isLoading()) { +
+ +
+ } }
diff --git a/report-app/src/app/shared/ai-assistant/ai-assistant.ts b/report-app/src/app/shared/ai-assistant/ai-assistant.ts index abc3290..6656868 100644 --- a/report-app/src/app/shared/ai-assistant/ai-assistant.ts +++ b/report-app/src/app/shared/ai-assistant/ai-assistant.ts @@ -1,19 +1,15 @@ -import {HttpClient} from '@angular/common/http'; -import {Component, inject, input, output, signal} from '@angular/core'; +import {HttpClient, httpResource} from '@angular/common/http'; +import {Component, computed, inject, input, linkedSignal, output, signal} from '@angular/core'; import {FormsModule} from '@angular/forms'; import {firstValueFrom} from 'rxjs'; import { AiChatMessage, AiChatRequest, AiChatResponse, + AIConfigState, } from '../../../../../runner/shared-interfaces'; import {MessageSpinner} from '../message-spinner'; -interface Model { - id: string; - name: string; -} - @Component({ selector: 'app-ai-assistant', templateUrl: './ai-assistant.html', @@ -34,12 +30,9 @@ export class AiAssistant { private readonly http = inject(HttpClient); - protected readonly models: Model[] = [ - {id: 'gemini-2.5-flash', name: 'Gemini 2.5 Flash'}, - {id: 'gemini-2.5-pro', name: 'Gemini 2.5 Pro'}, - {id: 'gemini-2.5-flash-lite', name: 'Gemini 2.5 Flash Lite'}, - ]; - protected selectedModel = this.models[0].id; + protected readonly aiConfigState = httpResource(() => '/api/ai-config-state'); + protected readonly models = computed(() => this.aiConfigState.value()?.configuredModels ?? []); + protected selectedModel = linkedSignal(() => this.models()[0]); protected toggleExpanded(): void { this.isExpanded.set(!this.isExpanded()); @@ -60,7 +53,7 @@ export class AiAssistant { const payload: AiChatRequest = { prompt, pastMessages, - model: this.selectedModel, + model: this.selectedModel(), }; try { diff --git a/runner/codegen/genkit/genkit-runner.ts b/runner/codegen/genkit/genkit-runner.ts index b4e1887..e4b6455 100644 --- a/runner/codegen/genkit/genkit-runner.ts +++ b/runner/codegen/genkit/genkit-runner.ts @@ -96,6 +96,10 @@ export class GenkitRunner implements LlmRunner { return MODEL_PROVIDERS.flatMap(p => p.getSupportedModels()); } + getSupportedModelsWithAPIKey(): string[] { + return MODEL_PROVIDERS.filter(p => p.getApiKey() !== null).flatMap(p => p.getSupportedModels()); + } + private async _genkitRequest( provider: GenkitModelProvider, model: ModelReference, diff --git a/runner/shared-interfaces.ts b/runner/shared-interfaces.ts index 7d61859..a43cb9a 100644 --- a/runner/shared-interfaces.ts +++ b/runner/shared-interfaces.ts @@ -323,6 +323,11 @@ export interface AiChatMessage { text: string; } +/** Interface describing the response of the AI config state report-server endpoint. */ +export interface AIConfigState { + configuredModels: string[]; +} + /** * A summary of build outcomes and code quality scores for an entire assessment run. */