diff --git a/src/core/webview/webviewMessageHandler.ts b/src/core/webview/webviewMessageHandler.ts index 5b1c66f995..4e2dda3708 100644 --- a/src/core/webview/webviewMessageHandler.ts +++ b/src/core/webview/webviewMessageHandler.ts @@ -5,6 +5,7 @@ import * as fs from "fs/promises" import pWaitFor from "p-wait-for" import * as vscode from "vscode" import * as yaml from "yaml" +import { serializeError } from "serialize-error" import { type Language, type ProviderSettings, type GlobalState, TelemetryEventName } from "@roo-code/types" import { CloudService } from "@roo-code/cloud" @@ -1807,6 +1808,43 @@ export const webviewMessageHandler = async ( break } case "codebaseIndexConfig": { + // Handle test action separately + if (message.action === "test") { + try { + if (!provider.codeIndexManager) { + throw new Error("Code index manager not available") + } + + // Get the service factory from the manager + const serviceFactory = provider.codeIndexManager.getServiceFactory() + if (!serviceFactory) { + throw new Error("Service factory not available") + } + + // Test the configuration with the UI values + const isValid = await serviceFactory.validateEmbedderConfig(message.values) + + // Send test result back to webview + provider.postMessageToWebview({ + type: "codebaseIndexTestResult", + success: isValid, + message: isValid ? "Configuration is valid" : "Configuration test failed", + }) + } catch (error) { + const serializedError = serializeError(error) + provider.log(`[CodeIndexManager] Configuration test error: ${serializedError.message}`) + + // Send error result back to webview with serialized error + provider.postMessageToWebview({ + type: "codebaseIndexTestResult", + success: false, + message: serializedError.message || "Configuration test failed", + }) + } + break + } + + // Normal configuration update flow const codebaseIndexConfig = message.values ?? { codebaseIndexEnabled: false, codebaseIndexQdrantUrl: "http://localhost:6333", @@ -1823,16 +1861,45 @@ export const webviewMessageHandler = async ( // If now configured and enabled, start indexing automatically if (provider.codeIndexManager.isFeatureEnabled && provider.codeIndexManager.isFeatureConfigured) { if (!provider.codeIndexManager.isInitialized) { - await provider.codeIndexManager.initialize(provider.contextProxy) + try { + await provider.codeIndexManager.initialize(provider.contextProxy) + } catch (initError) { + // Initialization failed - send error status to webview + const serializedError = serializeError(initError) + provider.log(`[CodeIndexManager] Initialization error: ${serializedError.message}`) + + // Send error status update to webview with serialized error details + const status = provider.codeIndexManager.getCurrentStatus() + provider.postMessageToWebview({ + type: "indexingStatusUpdate", + values: { + ...status, + errorDetails: serializedError, + }, + }) + + // Re-throw to prevent indexing attempt + throw initError + } } // Start indexing in background (no await) provider.codeIndexManager.startIndexing() } } } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error) provider.log( - `[CodeIndexManager] Error during background CodeIndexManager configuration/indexing: ${error.message || error}`, + `[CodeIndexManager] Error during background CodeIndexManager configuration/indexing: ${errorMessage}`, ) + + // Send error notification to webview if manager exists + if (provider.codeIndexManager) { + const status = provider.codeIndexManager.getCurrentStatus() + provider.postMessageToWebview({ + type: "indexingStatusUpdate", + values: status, + }) + } } await provider.postStateToWebview() diff --git a/src/i18n/locales/ca/embeddings.json b/src/i18n/locales/ca/embeddings.json index 3302ff7acd..17649bdf5d 100644 --- a/src/i18n/locales/ca/embeddings.json +++ b/src/i18n/locales/ca/embeddings.json @@ -5,7 +5,22 @@ "failedWithError": "No s'han pogut crear les incrustacions després de {{attempts}} intents: {{errorMessage}}", "failedMaxAttempts": "No s'han pogut crear les incrustacions després de {{attempts}} intents", "textExceedsTokenLimit": "El text a l'índex {{index}} supera el límit màxim de testimonis ({{itemTokens}} > {{maxTokens}}). S'està ometent.", + "textWithPrefixExceedsTokenLimit": "El text a l'índex {{index}} amb prefix supera el límit màxim de testimonis ({{estimatedTokens}} > {{maxTokens}}). S'utilitza el text original sense prefix.", "rateLimitRetry": "S'ha assolit el límit de velocitat, es torna a intentar en {{delayMs}}ms (intent {{attempt}}/{{maxRetries}})", + "genericError": "Error de {{provider}}: {{errorDetails}}", + "validation": { + "modelNotFound": "No s'ha trobat el model '{{modelId}}'. Models disponibles: {{availableModels}}", + "invalidApiKey": "Clau API no vàlida. Comprova la teva clau API de {{provider}}.", + "rateLimitExceeded": "S'ha superat el límit de velocitat. Torna-ho a provar més tard.", + "networkError": "Error de xarxa. Comprova la teva connexió a Internet.", + "configurationFailed": "Ha fallat la validació de la configuració de {{provider}}", + "baseUrlRequired": "Es requereix l'URL base per a l'embedder {{provider}}", + "apiKeyRequired": "Es requereix la clau API per a l'embedder {{provider}}", + "endpointNotFound": "Punt final no trobat: {{baseUrl}}", + "cannotConnect": "No es pot connectar a {{provider}} a {{baseUrl}}. Assegura't que {{provider}} s'està executant.", + "apiNotFound": "API de {{provider}} no trobada a {{baseUrl}}. S'està executant {{provider}}?", + "connectionFailed": "Ha fallat la connexió a {{provider}}: {{status}} {{statusText}}" + }, "ollama": { "couldNotReadErrorBody": "No s'ha pogut llegir el cos de l'error", "requestFailed": "La sol·licitud de l'API d'Ollama ha fallat amb l'estat {{status}} {{statusText}}: {{errorBody}}", diff --git a/src/i18n/locales/de/embeddings.json b/src/i18n/locales/de/embeddings.json index 300899fd1b..43ef8063ce 100644 --- a/src/i18n/locales/de/embeddings.json +++ b/src/i18n/locales/de/embeddings.json @@ -5,7 +5,22 @@ "failedWithError": "Erstellung von Einbettungen nach {{attempts}} Versuchen fehlgeschlagen: {{errorMessage}}", "failedMaxAttempts": "Erstellung von Einbettungen nach {{attempts}} Versuchen fehlgeschlagen", "textExceedsTokenLimit": "Text bei Index {{index}} überschreitet das maximale Token-Limit ({{itemTokens}} > {{maxTokens}}). Wird übersprungen.", + "textWithPrefixExceedsTokenLimit": "Text bei Index {{index}} mit Präfix überschreitet das maximale Token-Limit ({{estimatedTokens}} > {{maxTokens}}). Verwende Originaltext ohne Präfix.", "rateLimitRetry": "Ratenlimit erreicht, Wiederholung in {{delayMs}}ms (Versuch {{attempt}}/{{maxRetries}})", + "genericError": "{{provider}}-Fehler: {{errorDetails}}", + "validation": { + "modelNotFound": "Modell '{{modelId}}' nicht gefunden. Verfügbare Modelle: {{availableModels}}", + "invalidApiKey": "Ungültiger API-Schlüssel. Überprüfe deinen {{provider}} API-Schlüssel.", + "rateLimitExceeded": "Ratenlimit überschritten. Versuche es später erneut.", + "networkError": "Netzwerkfehler. Überprüfe deine Internetverbindung.", + "configurationFailed": "{{provider}}-Konfigurationsvalidierung fehlgeschlagen", + "baseUrlRequired": "Basis-URL für {{provider}}-Embedder erforderlich", + "apiKeyRequired": "API-Schlüssel für {{provider}}-Embedder erforderlich", + "endpointNotFound": "Endpunkt nicht gefunden: {{baseUrl}}", + "cannotConnect": "Kann keine Verbindung zu {{provider}} unter {{baseUrl}} herstellen. Stelle sicher, dass {{provider}} läuft.", + "apiNotFound": "{{provider}} API nicht gefunden unter {{baseUrl}}. Läuft {{provider}}?", + "connectionFailed": "Verbindung zu {{provider}} fehlgeschlagen: {{status}} {{statusText}}" + }, "ollama": { "couldNotReadErrorBody": "Fehlerinhalt konnte nicht gelesen werden", "requestFailed": "Ollama API-Anfrage fehlgeschlagen mit Status {{status}} {{statusText}}: {{errorBody}}", diff --git a/src/i18n/locales/en/embeddings.json b/src/i18n/locales/en/embeddings.json index e57f3de0e8..7d2d8ff5de 100644 --- a/src/i18n/locales/en/embeddings.json +++ b/src/i18n/locales/en/embeddings.json @@ -5,7 +5,22 @@ "failedWithError": "Failed to create embeddings after {{attempts}} attempts: {{errorMessage}}", "failedMaxAttempts": "Failed to create embeddings after {{attempts}} attempts", "textExceedsTokenLimit": "Text at index {{index}} exceeds maximum token limit ({{itemTokens}} > {{maxTokens}}). Skipping.", + "textWithPrefixExceedsTokenLimit": "Text at index {{index}} with prefix exceeds maximum token limit ({{estimatedTokens}} > {{maxTokens}}). Using original text without prefix.", "rateLimitRetry": "Rate limit hit, retrying in {{delayMs}}ms (attempt {{attempt}}/{{maxRetries}})", + "genericError": "{{provider}} error: {{errorDetails}}", + "validation": { + "modelNotFound": "Model '{{modelId}}' not found. Available models: {{availableModels}}", + "invalidApiKey": "Invalid API key. Please check your {{provider}} API key.", + "rateLimitExceeded": "Rate limit exceeded. Please try again later.", + "networkError": "Network error. Please check your internet connection.", + "configurationFailed": "Failed to validate {{provider}} configuration", + "baseUrlRequired": "Base URL is required for {{provider}} embedder", + "apiKeyRequired": "API key is required for {{provider}} embedder", + "endpointNotFound": "Endpoint not found: {{baseUrl}}", + "cannotConnect": "Cannot connect to {{provider}} at {{baseUrl}}. Please ensure {{provider}} is running.", + "apiNotFound": "{{provider}} API not found at {{baseUrl}}. Is {{provider}} running?", + "connectionFailed": "Failed to connect to {{provider}}: {{status}} {{statusText}}" + }, "ollama": { "couldNotReadErrorBody": "Could not read error body", "requestFailed": "Ollama API request failed with status {{status}} {{statusText}}: {{errorBody}}", diff --git a/src/i18n/locales/es/embeddings.json b/src/i18n/locales/es/embeddings.json index c2d7795362..c4e91a8d38 100644 --- a/src/i18n/locales/es/embeddings.json +++ b/src/i18n/locales/es/embeddings.json @@ -5,7 +5,22 @@ "failedWithError": "No se pudieron crear las incrustaciones después de {{attempts}} intentos: {{errorMessage}}", "failedMaxAttempts": "No se pudieron crear las incrustaciones después de {{attempts}} intentos", "textExceedsTokenLimit": "El texto en el índice {{index}} supera el límite máximo de tokens ({{itemTokens}} > {{maxTokens}}). Omitiendo.", + "textWithPrefixExceedsTokenLimit": "El texto en el índice {{index}} con prefijo supera el límite máximo de tokens ({{estimatedTokens}} > {{maxTokens}}). Usando texto original sin prefijo.", "rateLimitRetry": "Límite de velocidad alcanzado, reintentando en {{delayMs}}ms (intento {{attempt}}/{{maxRetries}})", + "genericError": "Error de {{provider}}: {{errorDetails}}", + "validation": { + "modelNotFound": "Modelo '{{modelId}}' no encontrado. Modelos disponibles: {{availableModels}}", + "invalidApiKey": "Clave API inválida. Verifica tu clave API de {{provider}}.", + "rateLimitExceeded": "Límite de velocidad excedido. Intenta de nuevo más tarde.", + "networkError": "Error de red. Verifica tu conexión a Internet.", + "configurationFailed": "Falló la validación de configuración de {{provider}}", + "baseUrlRequired": "Se requiere URL base para el embedder {{provider}}", + "apiKeyRequired": "Se requiere clave API para el embedder {{provider}}", + "endpointNotFound": "Punto final no encontrado: {{baseUrl}}", + "cannotConnect": "No se puede conectar a {{provider}} en {{baseUrl}}. Asegúrate de que {{provider}} esté ejecutándose.", + "apiNotFound": "API de {{provider}} no encontrada en {{baseUrl}}. ¿Está ejecutándose {{provider}}?", + "connectionFailed": "Falló la conexión a {{provider}}: {{status}} {{statusText}}" + }, "ollama": { "couldNotReadErrorBody": "No se pudo leer el cuerpo del error", "requestFailed": "La solicitud de la API de Ollama falló con estado {{status}} {{statusText}}: {{errorBody}}", diff --git a/src/i18n/locales/fr/embeddings.json b/src/i18n/locales/fr/embeddings.json index 4dbbe6218b..1f0ab2ea0a 100644 --- a/src/i18n/locales/fr/embeddings.json +++ b/src/i18n/locales/fr/embeddings.json @@ -5,7 +5,22 @@ "failedWithError": "Échec de la création des embeddings après {{attempts}} tentatives : {{errorMessage}}", "failedMaxAttempts": "Échec de la création des embeddings après {{attempts}} tentatives", "textExceedsTokenLimit": "Le texte à l'index {{index}} dépasse la limite maximale de tokens ({{itemTokens}} > {{maxTokens}}). Ignoré.", + "textWithPrefixExceedsTokenLimit": "Le texte à l'index {{index}} avec préfixe dépasse la limite maximale de tokens ({{estimatedTokens}} > {{maxTokens}}). Utilisation du texte original sans préfixe.", "rateLimitRetry": "Limite de débit atteinte, nouvelle tentative dans {{delayMs}}ms (tentative {{attempt}}/{{maxRetries}})", + "genericError": "Erreur {{provider}} : {{errorDetails}}", + "validation": { + "modelNotFound": "Modèle '{{modelId}}' introuvable. Modèles disponibles : {{availableModels}}", + "invalidApiKey": "Clé API invalide. Vérifie ta clé API {{provider}}.", + "rateLimitExceeded": "Limite de débit dépassée. Réessaye plus tard.", + "networkError": "Erreur réseau. Vérifie ta connexion Internet.", + "configurationFailed": "Échec de la validation de la configuration {{provider}}", + "baseUrlRequired": "URL de base requise pour l'embedder {{provider}}", + "apiKeyRequired": "Clé API requise pour l'embedder {{provider}}", + "endpointNotFound": "Point de terminaison introuvable : {{baseUrl}}", + "cannotConnect": "Impossible de se connecter à {{provider}} à {{baseUrl}}. Assure-toi que {{provider}} est en cours d'exécution.", + "apiNotFound": "API {{provider}} introuvable à {{baseUrl}}. {{provider}} est-il en cours d'exécution ?", + "connectionFailed": "Échec de la connexion à {{provider}} : {{status}} {{statusText}}" + }, "ollama": { "couldNotReadErrorBody": "Impossible de lire le corps de l'erreur", "requestFailed": "Échec de la requête API Ollama avec le statut {{status}} {{statusText}} : {{errorBody}}", diff --git a/src/i18n/locales/hi/embeddings.json b/src/i18n/locales/hi/embeddings.json index 312d42e69c..25ece3c27a 100644 --- a/src/i18n/locales/hi/embeddings.json +++ b/src/i18n/locales/hi/embeddings.json @@ -5,7 +5,22 @@ "failedWithError": "{{attempts}} प्रयासों के बाद एम्बेडिंग बनाने में विफल: {{errorMessage}}", "failedMaxAttempts": "{{attempts}} प्रयासों के बाद एम्बेडिंग बनाने में विफल", "textExceedsTokenLimit": "अनुक्रमणिका {{index}} पर पाठ अधिकतम टोकन सीमा ({{itemTokens}} > {{maxTokens}}) से अधिक है। छोड़ा जा रहा है।", + "textWithPrefixExceedsTokenLimit": "उपसर्ग के साथ अनुक्रमणिका {{index}} पर पाठ अधिकतम टोकन सीमा ({{estimatedTokens}} > {{maxTokens}}) से अधिक है। उपसर्ग के बिना मूल पाठ का उपयोग कर रहे हैं।", "rateLimitRetry": "दर सीमा समाप्त, {{delayMs}}ms में पुन: प्रयास किया जा रहा है (प्रयास {{attempt}}/{{maxRetries}})", + "genericError": "{{provider}} त्रुटि: {{errorDetails}}", + "validation": { + "modelNotFound": "मॉडल '{{modelId}}' नहीं मिला। उपलब्ध मॉडल: {{availableModels}}", + "invalidApiKey": "अमान्य API कुंजी। अपनी {{provider}} API कुंजी की जांच करें।", + "rateLimitExceeded": "दर सीमा पार हो गई। बाद में पुनः प्रयास करें।", + "networkError": "नेटवर्क त्रुटि। अपना इंटरनेट कनेक्शन जांचें।", + "configurationFailed": "{{provider}} कॉन्फ़िगरेशन सत्यापन विफल", + "baseUrlRequired": "{{provider}} एम्बेडर के लिए आधार URL आवश्यक है", + "apiKeyRequired": "{{provider}} एम्बेडर के लिए API कुंजी आवश्यक है", + "endpointNotFound": "एंडपॉइंट नहीं मिला: {{baseUrl}}", + "cannotConnect": "{{baseUrl}} पर {{provider}} से कनेक्ट नहीं हो सकता। सुनिश्चित करें कि {{provider}} चल रहा है।", + "apiNotFound": "{{baseUrl}} पर {{provider}} API नहीं मिला। क्या {{provider}} चल रहा है?", + "connectionFailed": "{{provider}} से कनेक्शन विफल: {{status}} {{statusText}}" + }, "ollama": { "couldNotReadErrorBody": "त्रुटि सामग्री पढ़ नहीं सका", "requestFailed": "Ollama API अनुरोध स्थिति {{status}} {{statusText}} के साथ विफल: {{errorBody}}", diff --git a/src/i18n/locales/id/embeddings.json b/src/i18n/locales/id/embeddings.json index abfa9cb354..0b1d6022ca 100644 --- a/src/i18n/locales/id/embeddings.json +++ b/src/i18n/locales/id/embeddings.json @@ -5,7 +5,22 @@ "failedWithError": "Gagal membuat embeddings setelah {{attempts}} percobaan: {{errorMessage}}", "failedMaxAttempts": "Gagal membuat embeddings setelah {{attempts}} percobaan", "textExceedsTokenLimit": "Teks pada indeks {{index}} melebihi batas maksimum token ({{itemTokens}} > {{maxTokens}}). Dilewati.", + "textWithPrefixExceedsTokenLimit": "Teks pada indeks {{index}} dengan prefix melebihi batas maksimum token ({{estimatedTokens}} > {{maxTokens}}). Menggunakan teks asli tanpa prefix.", "rateLimitRetry": "Batas rate tercapai, mencoba lagi dalam {{delayMs}}ms (percobaan {{attempt}}/{{maxRetries}})", + "genericError": "Error {{provider}}: {{errorDetails}}", + "validation": { + "modelNotFound": "Model '{{modelId}}' tidak ditemukan. Model yang tersedia: {{availableModels}}", + "invalidApiKey": "Kunci API tidak valid. Periksa kunci API {{provider}} kamu.", + "rateLimitExceeded": "Batas rate terlampaui. Coba lagi nanti.", + "networkError": "Error jaringan. Periksa koneksi internet kamu.", + "configurationFailed": "Validasi konfigurasi {{provider}} gagal", + "baseUrlRequired": "URL dasar diperlukan untuk embedder {{provider}}", + "apiKeyRequired": "Kunci API diperlukan untuk embedder {{provider}}", + "endpointNotFound": "Endpoint tidak ditemukan: {{baseUrl}}", + "cannotConnect": "Tidak dapat terhubung ke {{provider}} di {{baseUrl}}. Pastikan {{provider}} sedang berjalan.", + "apiNotFound": "API {{provider}} tidak ditemukan di {{baseUrl}}. Apakah {{provider}} sedang berjalan?", + "connectionFailed": "Koneksi ke {{provider}} gagal: {{status}} {{statusText}}" + }, "ollama": { "couldNotReadErrorBody": "Tidak dapat membaca body error", "requestFailed": "Permintaan API Ollama gagal dengan status {{status}} {{statusText}}: {{errorBody}}", diff --git a/src/i18n/locales/it/embeddings.json b/src/i18n/locales/it/embeddings.json index 5bd7164886..4e42e13a81 100644 --- a/src/i18n/locales/it/embeddings.json +++ b/src/i18n/locales/it/embeddings.json @@ -5,7 +5,22 @@ "failedWithError": "Creazione degli embedding non riuscita dopo {{attempts}} tentativi: {{errorMessage}}", "failedMaxAttempts": "Creazione degli embedding non riuscita dopo {{attempts}} tentativi", "textExceedsTokenLimit": "Il testo all'indice {{index}} supera il limite massimo di token ({{itemTokens}} > {{maxTokens}}). Saltato.", + "textWithPrefixExceedsTokenLimit": "Il testo all'indice {{index}} con prefisso supera il limite massimo di token ({{estimatedTokens}} > {{maxTokens}}). Utilizzo del testo originale senza prefisso.", "rateLimitRetry": "Limite di velocità raggiunto, nuovo tentativo tra {{delayMs}}ms (tentativo {{attempt}}/{{maxRetries}})", + "genericError": "Errore {{provider}}: {{errorDetails}}", + "validation": { + "modelNotFound": "Modello '{{modelId}}' non trovato. Modelli disponibili: {{availableModels}}", + "invalidApiKey": "Chiave API non valida. Controlla la tua chiave API {{provider}}.", + "rateLimitExceeded": "Limite di velocità superato. Riprova più tardi.", + "networkError": "Errore di rete. Controlla la tua connessione Internet.", + "configurationFailed": "Validazione della configurazione {{provider}} fallita", + "baseUrlRequired": "URL di base richiesto per l'embedder {{provider}}", + "apiKeyRequired": "Chiave API richiesta per l'embedder {{provider}}", + "endpointNotFound": "Endpoint non trovato: {{baseUrl}}", + "cannotConnect": "Impossibile connettersi a {{provider}} su {{baseUrl}}. Assicurati che {{provider}} sia in esecuzione.", + "apiNotFound": "API {{provider}} non trovata su {{baseUrl}}. {{provider}} è in esecuzione?", + "connectionFailed": "Connessione a {{provider}} fallita: {{status}} {{statusText}}" + }, "ollama": { "couldNotReadErrorBody": "Impossibile leggere il corpo dell'errore", "requestFailed": "Richiesta API Ollama fallita con stato {{status}} {{statusText}}: {{errorBody}}", diff --git a/src/i18n/locales/ja/embeddings.json b/src/i18n/locales/ja/embeddings.json index 862270a364..bd6a4ed909 100644 --- a/src/i18n/locales/ja/embeddings.json +++ b/src/i18n/locales/ja/embeddings.json @@ -5,7 +5,22 @@ "failedWithError": "{{attempts}}回試行しましたが、埋め込みの作成に失敗しました:{{errorMessage}}", "failedMaxAttempts": "{{attempts}}回試行しましたが、埋め込みの作成に失敗しました", "textExceedsTokenLimit": "インデックス{{index}}のテキストが最大トークン制限を超えています({{itemTokens}}> {{maxTokens}})。スキップします。", + "textWithPrefixExceedsTokenLimit": "プレフィックス付きのインデックス{{index}}のテキストが最大トークン制限を超えています({{estimatedTokens}} > {{maxTokens}})。プレフィックスなしの元のテキストを使用します。", "rateLimitRetry": "レート制限に達しました。{{delayMs}}ミリ秒後に再試行します(試行{{attempt}}/{{maxRetries}})", + "genericError": "{{provider}}エラー: {{errorDetails}}", + "validation": { + "modelNotFound": "モデル '{{modelId}}' が見つかりません。利用可能なモデル: {{availableModels}}", + "invalidApiKey": "無効なAPIキー。{{provider}}のAPIキーを確認してください。", + "rateLimitExceeded": "レート制限を超えました。後でもう一度お試しください。", + "networkError": "ネットワークエラー。インターネット接続を確認してください。", + "configurationFailed": "{{provider}}の設定検証に失敗しました", + "baseUrlRequired": "{{provider}}エンベッダーにはベースURLが必要です", + "apiKeyRequired": "{{provider}}エンベッダーにはAPIキーが必要です", + "endpointNotFound": "エンドポイントが見つかりません: {{baseUrl}}", + "cannotConnect": "{{baseUrl}}の{{provider}}に接続できません。{{provider}}が実行されていることを確認してください。", + "apiNotFound": "{{baseUrl}}で{{provider}} APIが見つかりません。{{provider}}は実行されていますか?", + "connectionFailed": "{{provider}}への接続に失敗しました: {{status}} {{statusText}}" + }, "ollama": { "couldNotReadErrorBody": "エラー本文を読み取れませんでした", "requestFailed": "Ollama APIリクエストが失敗しました。ステータス {{status}} {{statusText}}: {{errorBody}}", diff --git a/src/i18n/locales/ko/embeddings.json b/src/i18n/locales/ko/embeddings.json index 37877bfa97..e33c7bc20f 100644 --- a/src/i18n/locales/ko/embeddings.json +++ b/src/i18n/locales/ko/embeddings.json @@ -5,7 +5,22 @@ "failedWithError": "{{attempts}}번 시도 후 임베딩 생성 실패: {{errorMessage}}", "failedMaxAttempts": "{{attempts}}번 시도 후 임베딩 생성 실패", "textExceedsTokenLimit": "인덱스 {{index}}의 텍스트가 최대 토큰 제한({{itemTokens}} > {{maxTokens}})을 초과했습니다. 건너뜁니다.", + "textWithPrefixExceedsTokenLimit": "접두사가 있는 인덱스 {{index}}의 텍스트가 최대 토큰 제한({{estimatedTokens}} > {{maxTokens}})을 초과했습니다. 접두사 없이 원본 텍스트를 사용합니다.", "rateLimitRetry": "속도 제한에 도달했습니다. {{delayMs}}ms 후에 다시 시도합니다(시도 {{attempt}}/{{maxRetries}}).", + "genericError": "{{provider}} 오류: {{errorDetails}}", + "validation": { + "modelNotFound": "'{{modelId}}' 모델을 찾을 수 없습니다. 사용 가능한 모델: {{availableModels}}", + "invalidApiKey": "잘못된 API 키입니다. {{provider}} API 키를 확인하세요.", + "rateLimitExceeded": "속도 제한을 초과했습니다. 나중에 다시 시도하세요.", + "networkError": "네트워크 오류입니다. 인터넷 연결을 확인하세요.", + "configurationFailed": "{{provider}} 구성 검증 실패", + "baseUrlRequired": "{{provider}} 임베더에 기본 URL이 필요합니다", + "apiKeyRequired": "{{provider}} 임베더에 API 키가 필요합니다", + "endpointNotFound": "엔드포인트를 찾을 수 없습니다: {{baseUrl}}", + "cannotConnect": "{{baseUrl}}의 {{provider}}에 연결할 수 없습니다. {{provider}}가 실행 중인지 확인하세요.", + "apiNotFound": "{{baseUrl}}에서 {{provider}} API를 찾을 수 없습니다. {{provider}}가 실행 중인가요?", + "connectionFailed": "{{provider}} 연결 실패: {{status}} {{statusText}}" + }, "ollama": { "couldNotReadErrorBody": "오류 본문을 읽을 수 없습니다", "requestFailed": "Ollama API 요청이 실패했습니다. 상태 {{status}} {{statusText}}: {{errorBody}}", diff --git a/src/i18n/locales/nl/embeddings.json b/src/i18n/locales/nl/embeddings.json index 7256b0973b..2739501df2 100644 --- a/src/i18n/locales/nl/embeddings.json +++ b/src/i18n/locales/nl/embeddings.json @@ -5,7 +5,22 @@ "failedWithError": "Insluitingen maken mislukt na {{attempts}} pogingen: {{errorMessage}}", "failedMaxAttempts": "Insluitingen maken mislukt na {{attempts}} pogingen", "textExceedsTokenLimit": "Tekst op index {{index}} overschrijdt de maximale tokenlimiet ({{itemTokens}} > {{maxTokens}}). Wordt overgeslagen.", + "textWithPrefixExceedsTokenLimit": "Tekst op index {{index}} met prefix overschrijdt de maximale tokenlimiet ({{estimatedTokens}} > {{maxTokens}}). Originele tekst zonder prefix wordt gebruikt.", "rateLimitRetry": "Snelheidslimiet bereikt, opnieuw proberen over {{delayMs}}ms (poging {{attempt}}/{{maxRetries}})", + "genericError": "{{provider}}-fout: {{errorDetails}}", + "validation": { + "modelNotFound": "Model '{{modelId}}' niet gevonden. Beschikbare modellen: {{availableModels}}", + "invalidApiKey": "Ongeldige API-sleutel. Controleer je {{provider}} API-sleutel.", + "rateLimitExceeded": "Snelheidslimiet overschreden. Probeer het later opnieuw.", + "networkError": "Netwerkfout. Controleer je internetverbinding.", + "configurationFailed": "{{provider}}-configuratievalidatie mislukt", + "baseUrlRequired": "Basis-URL vereist voor {{provider}}-embedder", + "apiKeyRequired": "API-sleutel vereist voor {{provider}}-embedder", + "endpointNotFound": "Eindpunt niet gevonden: {{baseUrl}}", + "cannotConnect": "Kan geen verbinding maken met {{provider}} op {{baseUrl}}. Zorg ervoor dat {{provider}} actief is.", + "apiNotFound": "{{provider}} API niet gevonden op {{baseUrl}}. Is {{provider}} actief?", + "connectionFailed": "Verbinding met {{provider}} mislukt: {{status}} {{statusText}}" + }, "ollama": { "couldNotReadErrorBody": "Kon foutinhoud niet lezen", "requestFailed": "Ollama API-verzoek mislukt met status {{status}} {{statusText}}: {{errorBody}}", diff --git a/src/i18n/locales/pl/embeddings.json b/src/i18n/locales/pl/embeddings.json index c3e160869b..97c6f2b274 100644 --- a/src/i18n/locales/pl/embeddings.json +++ b/src/i18n/locales/pl/embeddings.json @@ -5,7 +5,22 @@ "failedWithError": "Nie udało się utworzyć osadzeń po {{attempts}} próbach: {{errorMessage}}", "failedMaxAttempts": "Nie udało się utworzyć osadzeń po {{attempts}} próbach", "textExceedsTokenLimit": "Tekst w indeksie {{index}} przekracza maksymalny limit tokenów ({{itemTokens}} > {{maxTokens}}). Pomijanie.", + "textWithPrefixExceedsTokenLimit": "Tekst w indeksie {{index}} z prefiksem przekracza maksymalny limit tokenów ({{estimatedTokens}} > {{maxTokens}}). Używanie oryginalnego tekstu bez prefiksu.", "rateLimitRetry": "Osiągnięto limit szybkości, ponawianie za {{delayMs}}ms (próba {{attempt}}/{{maxRetries}})", + "genericError": "Błąd {{provider}}: {{errorDetails}}", + "validation": { + "modelNotFound": "Nie znaleziono modelu '{{modelId}}'. Dostępne modele: {{availableModels}}", + "invalidApiKey": "Nieprawidłowy klucz API. Sprawdź swój klucz API {{provider}}.", + "rateLimitExceeded": "Przekroczono limit szybkości. Spróbuj ponownie później.", + "networkError": "Błąd sieci. Sprawdź połączenie internetowe.", + "configurationFailed": "Walidacja konfiguracji {{provider}} nie powiodła się", + "baseUrlRequired": "Wymagany jest bazowy URL dla embeddera {{provider}}", + "apiKeyRequired": "Wymagany jest klucz API dla embeddera {{provider}}", + "endpointNotFound": "Nie znaleziono punktu końcowego: {{baseUrl}}", + "cannotConnect": "Nie można połączyć się z {{provider}} pod adresem {{baseUrl}}. Upewnij się, że {{provider}} jest uruchomiony.", + "apiNotFound": "Nie znaleziono API {{provider}} pod adresem {{baseUrl}}. Czy {{provider}} jest uruchomiony?", + "connectionFailed": "Połączenie z {{provider}} nie powiodło się: {{status}} {{statusText}}" + }, "ollama": { "couldNotReadErrorBody": "Nie można odczytać treści błędu", "requestFailed": "Żądanie API Ollama nie powiodło się ze statusem {{status}} {{statusText}}: {{errorBody}}", diff --git a/src/i18n/locales/pt-BR/embeddings.json b/src/i18n/locales/pt-BR/embeddings.json index 6b97475265..a90393b032 100644 --- a/src/i18n/locales/pt-BR/embeddings.json +++ b/src/i18n/locales/pt-BR/embeddings.json @@ -5,7 +5,22 @@ "failedWithError": "Falha ao criar embeddings após {{attempts}} tentativas: {{errorMessage}}", "failedMaxAttempts": "Falha ao criar embeddings após {{attempts}} tentativas", "textExceedsTokenLimit": "O texto no índice {{index}} excede o limite máximo de tokens ({{itemTokens}} > {{maxTokens}}). Ignorando.", + "textWithPrefixExceedsTokenLimit": "O texto no índice {{index}} com prefixo excede o limite máximo de tokens ({{estimatedTokens}} > {{maxTokens}}). Usando texto original sem prefixo.", "rateLimitRetry": "Limite de taxa atingido, tentando novamente em {{delayMs}}ms (tentativa {{attempt}}/{{maxRetries}})", + "genericError": "Erro do {{provider}}: {{errorDetails}}", + "validation": { + "modelNotFound": "Modelo '{{modelId}}' não encontrado. Modelos disponíveis: {{availableModels}}", + "invalidApiKey": "Chave API inválida. Verifique sua chave API do {{provider}}.", + "rateLimitExceeded": "Limite de taxa excedido. Tente novamente mais tarde.", + "networkError": "Erro de rede. Verifique sua conexão com a Internet.", + "configurationFailed": "Falha na validação da configuração do {{provider}}", + "baseUrlRequired": "URL base necessária para o embedder {{provider}}", + "apiKeyRequired": "Chave API necessária para o embedder {{provider}}", + "endpointNotFound": "Endpoint não encontrado: {{baseUrl}}", + "cannotConnect": "Não é possível conectar ao {{provider}} em {{baseUrl}}. Certifique-se de que o {{provider}} está em execução.", + "apiNotFound": "API do {{provider}} não encontrada em {{baseUrl}}. O {{provider}} está em execução?", + "connectionFailed": "Falha na conexão com {{provider}}: {{status}} {{statusText}}" + }, "ollama": { "couldNotReadErrorBody": "Não foi possível ler o corpo do erro", "requestFailed": "Solicitação da API Ollama falhou com status {{status}} {{statusText}}: {{errorBody}}", diff --git a/src/i18n/locales/ru/embeddings.json b/src/i18n/locales/ru/embeddings.json index c6143816e8..612d1e3a48 100644 --- a/src/i18n/locales/ru/embeddings.json +++ b/src/i18n/locales/ru/embeddings.json @@ -5,7 +5,22 @@ "failedWithError": "Не удалось создать вложения после {{attempts}} попыток: {{errorMessage}}", "failedMaxAttempts": "Не удалось создать вложения после {{attempts}} попыток", "textExceedsTokenLimit": "Текст в индексе {{index}} превышает максимальный лимит токенов ({{itemTokens}} > {{maxTokens}}). Пропускается.", + "textWithPrefixExceedsTokenLimit": "Текст в индексе {{index}} с префиксом превышает максимальный лимит токенов ({{estimatedTokens}} > {{maxTokens}}). Используется оригинальный текст без префикса.", "rateLimitRetry": "Достигнут лимит скорости, повторная попытка через {{delayMs}} мс (попытка {{attempt}}/{{maxRetries}})", + "genericError": "Ошибка {{provider}}: {{errorDetails}}", + "validation": { + "modelNotFound": "Модель '{{modelId}}' не найдена. Доступные модели: {{availableModels}}", + "invalidApiKey": "Недействительный API-ключ. Проверь свой API-ключ {{provider}}.", + "rateLimitExceeded": "Превышен лимит скорости. Попробуй позже.", + "networkError": "Ошибка сети. Проверь подключение к интернету.", + "configurationFailed": "Ошибка проверки конфигурации {{provider}}", + "baseUrlRequired": "Требуется базовый URL для эмбеддера {{provider}}", + "apiKeyRequired": "Требуется API-ключ для эмбеддера {{provider}}", + "endpointNotFound": "Конечная точка не найдена: {{baseUrl}}", + "cannotConnect": "Не удается подключиться к {{provider}} по адресу {{baseUrl}}. Убедись, что {{provider}} запущен.", + "apiNotFound": "API {{provider}} не найден по адресу {{baseUrl}}. {{provider}} запущен?", + "connectionFailed": "Не удалось подключиться к {{provider}}: {{status}} {{statusText}}" + }, "ollama": { "couldNotReadErrorBody": "Не удалось прочитать тело ошибки", "requestFailed": "Запрос к API Ollama не удался со статусом {{status}} {{statusText}}: {{errorBody}}", diff --git a/src/i18n/locales/tr/embeddings.json b/src/i18n/locales/tr/embeddings.json index 10ad965f0f..8dad9ac29c 100644 --- a/src/i18n/locales/tr/embeddings.json +++ b/src/i18n/locales/tr/embeddings.json @@ -5,7 +5,22 @@ "failedWithError": "{{attempts}} denemeden sonra gömülmeler oluşturulamadı: {{errorMessage}}", "failedMaxAttempts": "{{attempts}} denemeden sonra gömülmeler oluşturulamadı", "textExceedsTokenLimit": "{{index}} dizinindeki metin maksimum jeton sınırını aşıyor ({{itemTokens}} > {{maxTokens}}). Atlanıyor.", + "textWithPrefixExceedsTokenLimit": "Ön ekli {{index}} dizinindeki metin maksimum jeton sınırını aşıyor ({{estimatedTokens}} > {{maxTokens}}). Ön eksiz orijinal metin kullanılıyor.", "rateLimitRetry": "Hız sınırına ulaşıldı, {{delayMs}}ms içinde yeniden deneniyor (deneme {{attempt}}/{{maxRetries}})", + "genericError": "{{provider}} hatası: {{errorDetails}}", + "validation": { + "modelNotFound": "'{{modelId}}' modeli bulunamadı. Mevcut modeller: {{availableModels}}", + "invalidApiKey": "Geçersiz API anahtarı. {{provider}} API anahtarını kontrol et.", + "rateLimitExceeded": "Hız sınırı aşıldı. Daha sonra tekrar dene.", + "networkError": "Ağ hatası. İnternet bağlantını kontrol et.", + "configurationFailed": "{{provider}} yapılandırma doğrulaması başarısız", + "baseUrlRequired": "{{provider}} gömücü için temel URL gerekli", + "apiKeyRequired": "{{provider}} gömücü için API anahtarı gerekli", + "endpointNotFound": "Uç nokta bulunamadı: {{baseUrl}}", + "cannotConnect": "{{baseUrl}} adresindeki {{provider}}'a bağlanılamıyor. {{provider}}'ın çalıştığından emin ol.", + "apiNotFound": "{{baseUrl}} adresinde {{provider}} API'si bulunamadı. {{provider}} çalışıyor mu?", + "connectionFailed": "{{provider}} bağlantısı başarısız: {{status}} {{statusText}}" + }, "ollama": { "couldNotReadErrorBody": "Hata gövdesi okunamadı", "requestFailed": "Ollama API isteği {{status}} {{statusText}} durumuyla başarısız oldu: {{errorBody}}", diff --git a/src/i18n/locales/vi/embeddings.json b/src/i18n/locales/vi/embeddings.json index a533aaac07..0de6a8771c 100644 --- a/src/i18n/locales/vi/embeddings.json +++ b/src/i18n/locales/vi/embeddings.json @@ -5,7 +5,22 @@ "failedWithError": "Không thể tạo nhúng sau {{attempts}} lần thử: {{errorMessage}}", "failedMaxAttempts": "Không thể tạo nhúng sau {{attempts}} lần thử", "textExceedsTokenLimit": "Văn bản tại chỉ mục {{index}} vượt quá giới hạn mã thông báo tối đa ({{itemTokens}} > {{maxTokens}}). Bỏ qua.", + "textWithPrefixExceedsTokenLimit": "Văn bản tại chỉ mục {{index}} với tiền tố vượt quá giới hạn mã thông báo tối đa ({{estimatedTokens}} > {{maxTokens}}). Sử dụng văn bản gốc không có tiền tố.", "rateLimitRetry": "Đã đạt đến giới hạn tốc độ, thử lại sau {{delayMs}}ms (lần thử {{attempt}}/{{maxRetries}})", + "genericError": "Lỗi {{provider}}: {{errorDetails}}", + "validation": { + "modelNotFound": "Không tìm thấy mô hình '{{modelId}}'. Các mô hình khả dụng: {{availableModels}}", + "invalidApiKey": "Khóa API không hợp lệ. Kiểm tra khóa API {{provider}} của bạn.", + "rateLimitExceeded": "Đã vượt quá giới hạn tốc độ. Thử lại sau.", + "networkError": "Lỗi mạng. Kiểm tra kết nối Internet của bạn.", + "configurationFailed": "Xác thực cấu hình {{provider}} thất bại", + "baseUrlRequired": "Cần URL cơ sở cho trình nhúng {{provider}}", + "apiKeyRequired": "Cần khóa API cho trình nhúng {{provider}}", + "endpointNotFound": "Không tìm thấy điểm cuối: {{baseUrl}}", + "cannotConnect": "Không thể kết nối với {{provider}} tại {{baseUrl}}. Đảm bảo {{provider}} đang chạy.", + "apiNotFound": "Không tìm thấy API {{provider}} tại {{baseUrl}}. {{provider}} có đang chạy không?", + "connectionFailed": "Kết nối với {{provider}} thất bại: {{status}} {{statusText}}" + }, "ollama": { "couldNotReadErrorBody": "Không thể đọc nội dung lỗi", "requestFailed": "Yêu cầu API Ollama thất bại với trạng thái {{status}} {{statusText}}: {{errorBody}}", diff --git a/src/i18n/locales/zh-CN/embeddings.json b/src/i18n/locales/zh-CN/embeddings.json index dba5282844..3afef32f59 100644 --- a/src/i18n/locales/zh-CN/embeddings.json +++ b/src/i18n/locales/zh-CN/embeddings.json @@ -5,7 +5,22 @@ "failedWithError": "尝试 {{attempts}} 次后创建嵌入失败:{{errorMessage}}", "failedMaxAttempts": "尝试 {{attempts}} 次后创建嵌入失败", "textExceedsTokenLimit": "索引 {{index}} 处的文本超过最大令牌限制 ({{itemTokens}} > {{maxTokens}})。正在跳过。", + "textWithPrefixExceedsTokenLimit": "带前缀的索引 {{index}} 处的文本超过最大令牌限制 ({{estimatedTokens}} > {{maxTokens}})。使用不带前缀的原始文本。", "rateLimitRetry": "已达到速率限制,将在 {{delayMs}} 毫秒后重试(尝试次数 {{attempt}}/{{maxRetries}})", + "genericError": "{{provider}} 错误: {{errorDetails}}", + "validation": { + "modelNotFound": "未找到模型 '{{modelId}}'。可用模型: {{availableModels}}", + "invalidApiKey": "无效的 API 密钥。请检查你的 {{provider}} API 密钥。", + "rateLimitExceeded": "超出速率限制。请稍后重试。", + "networkError": "网络错误。请检查你的互联网连接。", + "configurationFailed": "{{provider}} 配置验证失败", + "baseUrlRequired": "{{provider}} 嵌入器需要基础 URL", + "apiKeyRequired": "{{provider}} 嵌入器需要 API 密钥", + "endpointNotFound": "未找到端点: {{baseUrl}}", + "cannotConnect": "无法连接到 {{baseUrl}} 的 {{provider}}。请确保 {{provider}} 正在运行。", + "apiNotFound": "在 {{baseUrl}} 未找到 {{provider}} API。{{provider}} 是否正在运行?", + "connectionFailed": "连接到 {{provider}} 失败: {{status}} {{statusText}}" + }, "ollama": { "couldNotReadErrorBody": "无法读取错误内容", "requestFailed": "Ollama API 请求失败,状态码 {{status}} {{statusText}}:{{errorBody}}", diff --git a/src/i18n/locales/zh-TW/embeddings.json b/src/i18n/locales/zh-TW/embeddings.json index 71a5a482f2..7fcb1d5670 100644 --- a/src/i18n/locales/zh-TW/embeddings.json +++ b/src/i18n/locales/zh-TW/embeddings.json @@ -5,7 +5,22 @@ "failedWithError": "嘗試 {{attempts}} 次後建立內嵌失敗:{{errorMessage}}", "failedMaxAttempts": "嘗試 {{attempts}} 次後建立內嵌失敗", "textExceedsTokenLimit": "索引 {{index}} 處的文字超過最大權杖限制 ({{itemTokens}} > {{maxTokens}})。正在略過。", + "textWithPrefixExceedsTokenLimit": "帶前綴的索引 {{index}} 處的文字超過最大權杖限制 ({{estimatedTokens}} > {{maxTokens}})。使用不帶前綴的原始文字。", "rateLimitRetry": "已達到速率限制,將在 {{delayMs}} 毫秒後重試(嘗試次數 {{attempt}}/{{maxRetries}})", + "genericError": "{{provider}} 錯誤: {{errorDetails}}", + "validation": { + "modelNotFound": "找不到模型 '{{modelId}}'。可用模型: {{availableModels}}", + "invalidApiKey": "無效的 API 金鑰。請檢查你的 {{provider}} API 金鑰。", + "rateLimitExceeded": "超出速率限制。請稍後再試。", + "networkError": "網路錯誤。請檢查你的網際網路連線。", + "configurationFailed": "{{provider}} 設定驗證失敗", + "baseUrlRequired": "{{provider}} 嵌入器需要基礎 URL", + "apiKeyRequired": "{{provider}} 嵌入器需要 API 金鑰", + "endpointNotFound": "找不到端點: {{baseUrl}}", + "cannotConnect": "無法連接到 {{baseUrl}} 的 {{provider}}。請確保 {{provider}} 正在執行。", + "apiNotFound": "在 {{baseUrl}} 找不到 {{provider}} API。{{provider}} 是否正在執行?", + "connectionFailed": "連接到 {{provider}} 失敗: {{status}} {{statusText}}" + }, "ollama": { "couldNotReadErrorBody": "無法讀取錯誤內容", "requestFailed": "Ollama API 請求失敗,狀態碼 {{status}} {{statusText}}:{{errorBody}}", diff --git a/src/services/code-index/__tests__/service-factory.spec.ts b/src/services/code-index/__tests__/service-factory.spec.ts index 5e2d878ffb..9177a90f33 100644 --- a/src/services/code-index/__tests__/service-factory.spec.ts +++ b/src/services/code-index/__tests__/service-factory.spec.ts @@ -146,7 +146,9 @@ describe("CodeIndexServiceFactory", () => { mockConfigManager.getConfig.mockReturnValue(testConfig as any) // Act & Assert - expect(() => factory.createEmbedder()).toThrow("OpenAI configuration missing for embedder creation") + expect(() => factory.createEmbedder()).toThrow( + "OpenAI API key is required. Please configure it in the settings.", + ) }) it("should throw error when Ollama base URL is missing", () => { @@ -161,7 +163,9 @@ describe("CodeIndexServiceFactory", () => { mockConfigManager.getConfig.mockReturnValue(testConfig as any) // Act & Assert - expect(() => factory.createEmbedder()).toThrow("Ollama configuration missing for embedder creation") + expect(() => factory.createEmbedder()).toThrow( + "Ollama base URL is required. Please configure it in the settings.", + ) }) it("should pass model ID to OpenAI Compatible embedder when using OpenAI Compatible provider", () => { @@ -225,7 +229,7 @@ describe("CodeIndexServiceFactory", () => { // Act & Assert expect(() => factory.createEmbedder()).toThrow( - "OpenAI Compatible configuration missing for embedder creation", + "OpenAI-compatible base URL required. Please configure in the settings.", ) }) @@ -243,7 +247,7 @@ describe("CodeIndexServiceFactory", () => { // Act & Assert expect(() => factory.createEmbedder()).toThrow( - "OpenAI Compatible configuration missing for embedder creation", + "OpenAI-compatible API key required. Please configure in the settings.", ) }) @@ -258,7 +262,7 @@ describe("CodeIndexServiceFactory", () => { // Act & Assert expect(() => factory.createEmbedder()).toThrow( - "OpenAI Compatible configuration missing for embedder creation", + "OpenAI-compatible base URL and API key required. Please configure in the settings.", ) }) @@ -580,4 +584,189 @@ describe("CodeIndexServiceFactory", () => { expect(() => factory.createVectorStore()).toThrow("Qdrant URL missing for vector store creation") }) }) + + describe("validateEmbedderConfig", () => { + beforeEach(() => { + vitest.clearAllMocks() + // Mock the static validation methods + MockedOpenAiEmbedder.validateEndpoint = vitest.fn().mockResolvedValue(true) + MockedCodeIndexOllamaEmbedder.validateEndpoint = vitest.fn().mockResolvedValue(true) + MockedOpenAICompatibleEmbedder.validateEndpoint = vitest.fn().mockResolvedValue(true) + }) + + it("should validate OpenAI configuration with provided config", async () => { + // Arrange + const providedConfig = { + embedderProvider: "openai", + modelId: "text-embedding-3-large", + openAiOptions: { + openAiNativeApiKey: "test-api-key", + }, + } + + // Act + const result = await factory.validateEmbedderConfig(providedConfig) + + // Assert + expect(result).toBe(true) + expect(MockedOpenAiEmbedder.validateEndpoint).toHaveBeenCalledWith("test-api-key", "text-embedding-3-large") + }) + + it("should validate Ollama configuration with provided config", async () => { + // Arrange + const providedConfig = { + embedderProvider: "ollama", + modelId: "nomic-embed-text:latest", + ollamaOptions: { + ollamaBaseUrl: "http://localhost:11434", + }, + } + + // Act + const result = await factory.validateEmbedderConfig(providedConfig) + + // Assert + expect(result).toBe(true) + expect(MockedCodeIndexOllamaEmbedder.validateEndpoint).toHaveBeenCalledWith( + "http://localhost:11434", + "nomic-embed-text:latest", + ) + }) + + it("should validate OpenAI-compatible configuration with provided config", async () => { + // Arrange + const providedConfig = { + embedderProvider: "openai-compatible", + modelId: "custom-model", + openAiCompatibleOptions: { + baseUrl: "https://api.example.com/v1", + apiKey: "test-api-key", + }, + } + + // Act + const result = await factory.validateEmbedderConfig(providedConfig) + + // Assert + expect(result).toBe(true) + expect(MockedOpenAICompatibleEmbedder.validateEndpoint).toHaveBeenCalledWith( + "https://api.example.com/v1", + "test-api-key", + "custom-model", + ) + }) + + it("should use current config when no config is provided", async () => { + // Arrange + const currentConfig = { + embedderProvider: "openai", + modelId: "text-embedding-3-small", + openAiOptions: { + openAiNativeApiKey: "current-api-key", + }, + } + mockConfigManager.getConfig.mockReturnValue(currentConfig as any) + + // Act + const result = await factory.validateEmbedderConfig() + + // Assert + expect(result).toBe(true) + expect(mockConfigManager.getConfig).toHaveBeenCalled() + expect(MockedOpenAiEmbedder.validateEndpoint).toHaveBeenCalledWith( + "current-api-key", + "text-embedding-3-small", + ) + }) + + it("should throw error for missing OpenAI API key", async () => { + // Arrange + const providedConfig = { + embedderProvider: "openai", + modelId: "text-embedding-3-large", + openAiOptions: { + openAiNativeApiKey: undefined, + }, + } + + // Act & Assert + await expect(factory.validateEmbedderConfig(providedConfig)).rejects.toThrow("OpenAI API key is required") + }) + + it("should throw error for missing Ollama base URL", async () => { + // Arrange + const providedConfig = { + embedderProvider: "ollama", + modelId: "nomic-embed-text:latest", + ollamaOptions: { + ollamaBaseUrl: undefined, + }, + } + + // Act & Assert + await expect(factory.validateEmbedderConfig(providedConfig)).rejects.toThrow("Ollama base URL is required") + }) + + it("should throw error for missing OpenAI-compatible credentials", async () => { + // Arrange + const providedConfig = { + embedderProvider: "openai-compatible", + modelId: "custom-model", + openAiCompatibleOptions: { + baseUrl: undefined, + apiKey: "test-api-key", + }, + } + + // Act & Assert + await expect(factory.validateEmbedderConfig(providedConfig)).rejects.toThrow( + "OpenAI-compatible base URL and API key are required", + ) + }) + + it("should throw error for invalid embedder type", async () => { + // Arrange + const providedConfig = { + embedderProvider: "invalid-provider", + modelId: "some-model", + } + + // Act & Assert + await expect(factory.validateEmbedderConfig(providedConfig)).rejects.toThrow( + "Invalid embedder type: invalid-provider", + ) + }) + + it("should propagate validation errors from embedder", async () => { + // Arrange + const providedConfig = { + embedderProvider: "openai", + modelId: "text-embedding-3-large", + openAiOptions: { + openAiNativeApiKey: "invalid-key", + }, + } + MockedOpenAiEmbedder.validateEndpoint = vitest.fn().mockRejectedValue(new Error("Invalid API key")) + + // Act & Assert + await expect(factory.validateEmbedderConfig(providedConfig)).rejects.toThrow("Invalid API key") + }) + + it("should use default model ID when not provided", async () => { + // Arrange + const providedConfig = { + embedderProvider: "openai", + openAiOptions: { + openAiNativeApiKey: "test-api-key", + }, + } + + // Act + const result = await factory.validateEmbedderConfig(providedConfig) + + // Assert + expect(result).toBe(true) + expect(MockedOpenAiEmbedder.validateEndpoint).toHaveBeenCalledWith("test-api-key", undefined) + }) + }) }) diff --git a/src/services/code-index/embedders/ollama.ts b/src/services/code-index/embedders/ollama.ts index 2f212c7745..965d69ce8d 100644 --- a/src/services/code-index/embedders/ollama.ts +++ b/src/services/code-index/embedders/ollama.ts @@ -1,8 +1,9 @@ import { ApiHandlerOptions } from "../../../shared/api" import { EmbedderInfo, EmbeddingResponse, IEmbedder } from "../interfaces" -import { getModelQueryPrefix } from "../../../shared/embeddingModels" +import { getModelQueryPrefix, getDefaultModelId } from "../../../shared/embeddingModels" import { MAX_ITEM_TOKENS } from "../constants" import { t } from "../../../i18n" +import { serializeError } from "serialize-error" /** * Implements the IEmbedder interface using a local Ollama instance. @@ -106,4 +107,84 @@ export class CodeIndexOllamaEmbedder implements IEmbedder { name: "ollama", } } + + /** + * Validates the Ollama configuration by attempting to connect to the endpoint. + * @param baseUrl - The base URL of the Ollama instance + * @param modelId - The model ID to check + * @returns A promise that resolves to true if valid, or throws an error with details + */ + static async validateEndpoint(baseUrl: string, modelId: string | undefined): Promise { + const effectiveModelId = modelId || getDefaultModelId("ollama") + const url = `${baseUrl}/api/tags` + + try { + const response = await fetch(url, { + method: "GET", + headers: { + "Content-Type": "application/json", + }, + }) + + if (!response.ok) { + if (response.status === 404) { + throw new Error(t("embeddings:validation.apiNotFound", { provider: "Ollama", baseUrl })) + } + throw new Error( + t("embeddings:validation.connectionFailed", { + provider: "Ollama", + status: response.status, + statusText: response.statusText, + }), + ) + } + + const data = await response.json() + const models = data.models || [] + const modelNames = models.map((m: any) => m.name) + + // Check if the specified model exists + if (!modelNames.includes(effectiveModelId)) { + throw new Error( + t("embeddings:validation.modelNotFound", { + modelId: effectiveModelId, + availableModels: modelNames.join(", ") || "none", + }), + ) + } + + return true + } catch (error: any) { + // If it's already a translated error, re-throw it + if ( + error?.message?.includes( + t("embeddings:validation.modelNotFound", { modelId: "", availableModels: "" }).split(":")[0], + ) || + error?.message?.includes( + t("embeddings:validation.apiNotFound", { provider: "", baseUrl: "" }).split(":")[0], + ) || + error?.message?.includes( + t("embeddings:validation.connectionFailed", { provider: "", status: "", statusText: "" }).split( + ":", + )[0], + ) + ) { + throw error + } + + const serialized = serializeError(error) + + if (error.message?.includes("fetch failed") || error.message?.includes("ECONNREFUSED")) { + throw new Error(t("embeddings:validation.cannotConnect", { provider: "Ollama", baseUrl })) + } + + const errorDetails = serialized.message || t("embeddings:unknownError") + throw new Error( + t("embeddings:genericError", { + provider: "Ollama", + errorDetails, + }), + ) + } + } } diff --git a/src/services/code-index/embedders/openai-compatible.ts b/src/services/code-index/embedders/openai-compatible.ts index 88eced8a0a..c5c6d106ea 100644 --- a/src/services/code-index/embedders/openai-compatible.ts +++ b/src/services/code-index/embedders/openai-compatible.ts @@ -8,6 +8,7 @@ import { } from "../constants" import { getDefaultModelId, getModelQueryPrefix } from "../../../shared/embeddingModels" import { t } from "../../../i18n" +import { serializeError } from "serialize-error" interface EmbeddingItem { embedding: string | number[] @@ -50,10 +51,10 @@ export class OpenAICompatibleEmbedder implements IEmbedder { */ constructor(baseUrl: string, apiKey: string, modelId?: string, maxItemTokens?: number) { if (!baseUrl) { - throw new Error("Base URL is required for OpenAI Compatible embedder") + throw new Error(t("embeddings:validation.baseUrlRequired", { provider: "OpenAI Compatible" })) } if (!apiKey) { - throw new Error("API key is required for OpenAI Compatible embedder") + throw new Error(t("embeddings:validation.apiKeyRequired", { provider: "OpenAI Compatible" })) } this.baseUrl = baseUrl @@ -68,6 +69,51 @@ export class OpenAICompatibleEmbedder implements IEmbedder { this.maxItemTokens = maxItemTokens || MAX_ITEM_TOKENS } + /** + * Validates the endpoint by attempting a minimal embedding request + * @param baseUrl The base URL to validate + * @param apiKey The API key to use for validation + * @param modelId Optional model ID to test with + * @returns Promise resolving to true if valid + * @throws Error with descriptive message if validation fails + */ + static async validateEndpoint(baseUrl: string, apiKey: string, modelId: string | undefined): Promise { + try { + const client = new OpenAI({ + baseURL: baseUrl, + apiKey: apiKey, + }) + + const effectiveModelId = modelId || getDefaultModelId("openai-compatible") + + // Try a minimal embedding request + await client.embeddings.create({ + input: "test", + model: effectiveModelId, + }) + + return true + } catch (error: any) { + const serialized = serializeError(error) + + if (error?.status === 401) { + throw new Error(t("embeddings:authenticationFailed")) + } else if (error?.status === 404) { + throw new Error(t("embeddings:validation.endpointNotFound", { baseUrl })) + } else if (error?.code === "ECONNREFUSED" || error?.code === "ENOTFOUND") { + throw new Error(t("embeddings:validation.cannotConnect", { provider: "OpenAI Compatible", baseUrl })) + } + + const errorDetails = serialized.message || t("embeddings:unknownError") + throw new Error( + t("embeddings:genericError", { + provider: "OpenAI Compatible", + errorDetails, + }), + ) + } + } + /** * Creates embeddings for the given texts with batching and rate limiting * @param texts Array of text strings to embed diff --git a/src/services/code-index/embedders/openai.ts b/src/services/code-index/embedders/openai.ts index 667c2f46d4..495b69397b 100644 --- a/src/services/code-index/embedders/openai.ts +++ b/src/services/code-index/embedders/openai.ts @@ -8,8 +8,9 @@ import { MAX_BATCH_RETRIES as MAX_RETRIES, INITIAL_RETRY_DELAY_MS as INITIAL_DELAY_MS, } from "../constants" -import { getModelQueryPrefix } from "../../../shared/embeddingModels" +import { getModelQueryPrefix, getDefaultModelId } from "../../../shared/embeddingModels" import { t } from "../../../i18n" +import { serializeError } from "serialize-error" /** * OpenAI implementation of the embedder interface with batching and rate limiting @@ -193,4 +194,64 @@ export class OpenAiEmbedder extends OpenAiNativeHandler implements IEmbedder { name: "openai", } } + + /** + * Validates the OpenAI configuration by attempting to list models. + * @param apiKey - The OpenAI API key + * @param modelId - The model ID to check + * @returns A promise that resolves to true if valid, or throws an error with details + */ + static async validateEndpoint(apiKey: string, modelId: string | undefined): Promise { + const effectiveModelId = modelId || getDefaultModelId("openai") + const client = new OpenAI({ apiKey }) + + try { + // Try to list models to validate the API key + const models = await client.models.list() + const modelIds = models.data.map((m) => m.id) + + // Check if the specified embedding model exists or is a known model + const knownEmbeddingModels = ["text-embedding-3-small", "text-embedding-3-large", "text-embedding-ada-002"] + + if (!modelIds.includes(effectiveModelId) && !knownEmbeddingModels.includes(effectiveModelId)) { + throw new Error( + t("embeddings:validation.modelNotFound", { + modelId: effectiveModelId, + availableModels: knownEmbeddingModels.join(", "), + }), + ) + } + + return true + } catch (error: any) { + // If it's already a translated error, re-throw it + if ( + error?.message?.includes( + t("embeddings:validation.modelNotFound", { modelId: "", availableModels: "" }).split(":")[0], + ) + ) { + throw error + } + + const serialized = serializeError(error) + + if (error?.status === 401) { + throw new Error(t("embeddings:validation.invalidApiKey", { provider: "OpenAI" })) + } + if (error?.status === 429) { + throw new Error(t("embeddings:validation.rateLimitExceeded")) + } + if (error?.message?.includes("fetch failed") || error?.message?.includes("ECONNREFUSED")) { + throw new Error(t("embeddings:validation.networkError")) + } + + const errorDetails = serialized.message || t("embeddings:unknownError") + throw new Error( + t("embeddings:genericError", { + provider: "OpenAI", + errorDetails: `${t("embeddings:validation.configurationFailed", { provider: "OpenAI" })}: ${errorDetails}`, + }), + ) + } + } } diff --git a/src/services/code-index/interfaces/manager.ts b/src/services/code-index/interfaces/manager.ts index 70e3fd9765..c34cba1aaf 100644 --- a/src/services/code-index/interfaces/manager.ts +++ b/src/services/code-index/interfaces/manager.ts @@ -72,6 +72,21 @@ export interface ICodeIndexManager { export type IndexingState = "Standby" | "Indexing" | "Indexed" | "Error" export type EmbedderProvider = "openai" | "ollama" | "openai-compatible" | "gemini" +export interface IndexingStatus { + systemStatus: IndexingState + message?: string + processedItems?: number + totalItems?: number + currentItemUnit?: string + errorDetails?: { + type: "configuration" | "authentication" | "network" | "validation" | "unknown" + message: string + suggestion?: string + endpoint?: string + timestamp: number + } +} + export interface IndexProgressUpdate { systemStatus: IndexingState message?: string diff --git a/src/services/code-index/manager.ts b/src/services/code-index/manager.ts index 735bcee670..f95a5daa0a 100644 --- a/src/services/code-index/manager.ts +++ b/src/services/code-index/manager.ts @@ -12,6 +12,7 @@ import { CacheManager } from "./cache-manager" import fs from "fs/promises" import ignore from "ignore" import path from "path" +import { serializeError } from "serialize-error" export class CodeIndexManager { // --- Singleton Implementation --- @@ -229,31 +230,46 @@ export class CodeIndexManager { console.error("Unexpected error loading .gitignore:", error) } - // (Re)Create shared service instances - const { embedder, vectorStore, scanner, fileWatcher } = this._serviceFactory.createServices( - this.context, - this._cacheManager!, - ignoreInstance, - ) + try { + // (Re)Create shared service instances + const { embedder, vectorStore, scanner, fileWatcher } = this._serviceFactory.createServices( + this.context, + this._cacheManager!, + ignoreInstance, + ) + + // (Re)Initialize orchestrator + this._orchestrator = new CodeIndexOrchestrator( + this._configManager!, + this._stateManager, + this.workspacePath, + this._cacheManager!, + vectorStore, + scanner, + fileWatcher, + ) + + // (Re)Initialize search service + this._searchService = new CodeIndexSearchService( + this._configManager!, + this._stateManager, + embedder, + vectorStore, + ) + } catch (error) { + // Handle service creation errors + console.error("Failed to create code index services:", error) - // (Re)Initialize orchestrator - this._orchestrator = new CodeIndexOrchestrator( - this._configManager!, - this._stateManager, - this.workspacePath, - this._cacheManager!, - vectorStore, - scanner, - fileWatcher, - ) + // Serialize the error for consistent handling + const serializedError = serializeError(error) + const errorMessage = serializedError.message || "Unknown error" - // (Re)Initialize search service - this._searchService = new CodeIndexSearchService( - this._configManager!, - this._stateManager, - embedder, - vectorStore, - ) + // Set error state with serialized error details + this._stateManager.setSystemState("Error", errorMessage, serializedError) + + // Re-throw to be handled by caller + throw error + } } /** @@ -279,4 +295,12 @@ export class CodeIndexManager { } } } + + /** + * Gets the service factory instance for testing configurations. + * @returns The service factory instance or undefined if not initialized + */ + public getServiceFactory(): CodeIndexServiceFactory | undefined { + return this._serviceFactory + } } diff --git a/src/services/code-index/service-factory.ts b/src/services/code-index/service-factory.ts index 2a19c8ebab..08b79c142a 100644 --- a/src/services/code-index/service-factory.ts +++ b/src/services/code-index/service-factory.ts @@ -10,6 +10,7 @@ import { ICodeParser, IEmbedder, IFileWatcher, IVectorStore } from "./interfaces import { CodeIndexConfigManager } from "./config-manager" import { CacheManager } from "./cache-manager" import { Ignore } from "ignore" +import { t } from "../../i18n" /** * Factory class responsible for creating and configuring code indexing service dependencies. @@ -31,7 +32,7 @@ export class CodeIndexServiceFactory { if (provider === "openai") { if (!config.openAiOptions?.openAiNativeApiKey) { - throw new Error("OpenAI configuration missing for embedder creation") + throw new Error(t("codeIndex:openAiApiKeyRequired")) } return new OpenAiEmbedder({ ...config.openAiOptions, @@ -39,7 +40,7 @@ export class CodeIndexServiceFactory { }) } else if (provider === "ollama") { if (!config.ollamaOptions?.ollamaBaseUrl) { - throw new Error("Ollama configuration missing for embedder creation") + throw new Error(t("codeIndex:ollamaBaseUrlRequired")) } return new CodeIndexOllamaEmbedder({ ...config.ollamaOptions, @@ -47,7 +48,10 @@ export class CodeIndexServiceFactory { }) } else if (provider === "openai-compatible") { if (!config.openAiCompatibleOptions?.baseUrl || !config.openAiCompatibleOptions?.apiKey) { - throw new Error("OpenAI Compatible configuration missing for embedder creation") + const missing = [] + if (!config.openAiCompatibleOptions?.baseUrl) missing.push("base URL") + if (!config.openAiCompatibleOptions?.apiKey) missing.push("API key") + throw new Error(t("codeIndex:openAiCompatibleConfigRequired", { missing: missing.join(" and ") })) } return new OpenAICompatibleEmbedder( config.openAiCompatibleOptions.baseUrl, @@ -61,7 +65,53 @@ export class CodeIndexServiceFactory { return new GeminiEmbedder(config.geminiOptions.apiKey) } - throw new Error(`Invalid embedder type configured: ${config.embedderProvider}`) + throw new Error(t("codeIndex:invalidEmbedderType", { provider: config.embedderProvider })) + } + + /** + * Validates the embedder configuration by testing the connection. + * @param config - The configuration to validate (optional, defaults to current config) + * @returns A promise that resolves to true if valid, or throws an error with details + */ + public async validateEmbedderConfig(config?: any): Promise { + try { + // Use provided config or fall back to current config + const configToValidate = config || this.configManager.getConfig() + const provider = configToValidate.embedderProvider as EmbedderProvider + + if (provider === "openai") { + if (!configToValidate.openAiOptions?.openAiNativeApiKey) { + throw new Error(t("codeIndex:openAiApiKeyRequiredValidation")) + } + return await OpenAiEmbedder.validateEndpoint( + configToValidate.openAiOptions.openAiNativeApiKey, + configToValidate.modelId, + ) + } else if (provider === "ollama") { + if (!configToValidate.ollamaOptions?.ollamaBaseUrl) { + throw new Error(t("codeIndex:ollamaBaseUrlRequiredValidation")) + } + return await CodeIndexOllamaEmbedder.validateEndpoint( + configToValidate.ollamaOptions.ollamaBaseUrl, + configToValidate.modelId, + ) + } else if (provider === "openai-compatible") { + if ( + !configToValidate.openAiCompatibleOptions?.baseUrl || + !configToValidate.openAiCompatibleOptions?.apiKey + ) { + throw new Error(t("codeIndex:openAiCompatibleConfigRequiredValidation")) + } + return await OpenAICompatibleEmbedder.validateEndpoint( + configToValidate.openAiCompatibleOptions.baseUrl, + configToValidate.openAiCompatibleOptions.apiKey, + configToValidate.modelId, + ) + } + throw new Error(t("codeIndex:invalidEmbedderTypeValidation", { provider })) + } catch (error) { + throw new Error(t("codeIndex:embedderValidationFailed", { error: error.message })) + } } /** @@ -92,18 +142,16 @@ export class CodeIndexServiceFactory { } if (vectorSize === undefined) { - let errorMessage = `Could not determine vector dimension for model '${modelId}' with provider '${provider}'. ` if (provider === "openai-compatible") { - errorMessage += `Please ensure the 'Embedding Dimension' is correctly set in the OpenAI-Compatible provider settings.` + throw new Error(t("codeIndex:vectorDimensionErrorOpenAiCompatible", { modelId, provider })) } else { - errorMessage += `Check model profiles or configuration.` + throw new Error(t("codeIndex:vectorDimensionErrorGeneral", { modelId, provider })) } - throw new Error(errorMessage) } if (!config.qdrantUrl) { // This check remains important - throw new Error("Qdrant URL missing for vector store creation") + throw new Error(t("codeIndex:qdrantUrlMissing")) } // Assuming constructor is updated: new QdrantVectorStore(workspacePath, url, vectorSize, apiKey?) @@ -151,7 +199,7 @@ export class CodeIndexServiceFactory { fileWatcher: IFileWatcher } { if (!this.configManager.isFeatureConfigured) { - throw new Error("Cannot create services: Code indexing is not properly configured") + throw new Error(t("codeIndex:servicesNotConfigured")) } const embedder = this.createEmbedder() diff --git a/src/services/code-index/state-manager.ts b/src/services/code-index/state-manager.ts index 90257fdfb1..71d7cda383 100644 --- a/src/services/code-index/state-manager.ts +++ b/src/services/code-index/state-manager.ts @@ -2,12 +2,21 @@ import * as vscode from "vscode" export type IndexingState = "Standby" | "Indexing" | "Indexed" | "Error" +export interface ErrorDetails { + type: "configuration" | "authentication" | "network" | "validation" | "unknown" + message: string + suggestion?: string + endpoint?: string + timestamp: number +} + export class CodeIndexStateManager { private _systemStatus: IndexingState = "Standby" private _statusMessage: string = "" private _processedItems: number = 0 private _totalItems: number = 0 private _currentItemUnit: string = "blocks" + private _errorDetails: ErrorDetails | undefined = undefined private _progressEmitter = new vscode.EventEmitter>() // --- Public API --- @@ -25,14 +34,17 @@ export class CodeIndexStateManager { processedItems: this._processedItems, totalItems: this._totalItems, currentItemUnit: this._currentItemUnit, + errorDetails: this._errorDetails, } } // --- State Management --- - public setSystemState(newState: IndexingState, message?: string): void { + public setSystemState(newState: IndexingState, message?: string, errorDetails?: ErrorDetails): void { const stateChanged = - newState !== this._systemStatus || (message !== undefined && message !== this._statusMessage) + newState !== this._systemStatus || + (message !== undefined && message !== this._statusMessage) || + (errorDetails !== undefined && errorDetails !== this._errorDetails) if (stateChanged) { this._systemStatus = newState @@ -40,6 +52,14 @@ export class CodeIndexStateManager { this._statusMessage = message } + // Handle error details + if (newState === "Error" && errorDetails) { + this._errorDetails = errorDetails + } else if (newState !== "Error") { + // Clear error details when transitioning to non-error states + this._errorDetails = undefined + } + // Reset progress counters if moving to a non-indexing state or starting fresh if (newState !== "Indexing") { this._processedItems = 0 diff --git a/src/shared/ExtensionMessage.ts b/src/shared/ExtensionMessage.ts index a1fb59c89d..e2a40c0333 100644 --- a/src/shared/ExtensionMessage.ts +++ b/src/shared/ExtensionMessage.ts @@ -32,6 +32,13 @@ export interface IndexingStatus { processedItems: number totalItems: number currentItemUnit?: string + errorDetails?: { + type: "configuration" | "authentication" | "network" | "validation" | "unknown" + message: string + suggestion?: string + endpoint?: string + timestamp: number + } } export interface IndexingStatusUpdateMessage { @@ -100,6 +107,7 @@ export interface ExtensionMessage { | "indexingStatusUpdate" | "indexCleared" | "codebaseIndexConfig" + | "codebaseIndexTestResult" | "marketplaceInstallResult" | "marketplaceData" | "shareTaskSuccess" @@ -154,6 +162,7 @@ export interface ExtensionMessage { marketplaceInstalledMetadata?: MarketplaceInstalledMetadata visibility?: ShareVisibility rulesFolderPath?: string + message?: string // For test results and other messages } export type ExtensionState = Pick< diff --git a/src/shared/WebviewMessage.ts b/src/shared/WebviewMessage.ts index 4cd0541828..3245c37d36 100644 --- a/src/shared/WebviewMessage.ts +++ b/src/shared/WebviewMessage.ts @@ -163,6 +163,7 @@ export interface WebviewMessage { | "indexCleared" | "focusPanelRequest" | "codebaseIndexConfig" + | "codebaseIndexTestResult" | "profileThresholds" | "setHistoryPreviewCollapsed" | "openExternal" @@ -223,6 +224,7 @@ export interface WebviewMessage { visibility?: ShareVisibility // For share visibility hasContent?: boolean // For checkRulesDirectoryResult checkOnly?: boolean // For deleteCustomMode check + action?: string // For actions like "test" in codebaseIndexConfig } export const checkoutDiffPayloadSchema = z.object({ @@ -245,6 +247,16 @@ export type CheckpointRestorePayload = z.infer = ({ codebaseIndexModels, @@ -59,7 +59,20 @@ export const CodeIndexSettings: React.FC = ({ }) => { const { t } = useAppTranslation() const DEFAULT_QDRANT_URL = "http://localhost:6333" - const [indexingStatus, setIndexingStatus] = useState({ + const [indexingStatus, setIndexingStatus] = useState<{ + systemStatus: string + message: string + processedItems: number + totalItems: number + currentItemUnit: string + errorDetails?: { + type: "configuration" | "authentication" | "network" | "validation" | "unknown" + message: string + suggestion?: string + endpoint?: string + timestamp: number + } + }>({ systemStatus: "Standby", message: "", processedItems: 0, @@ -67,6 +80,8 @@ export const CodeIndexSettings: React.FC = ({ currentItemUnit: "items", }) const [advancedExpanded, setAdvancedExpanded] = useState(false) + const [testingConfig, setTestingConfig] = useState(false) + const [testResult, setTestResult] = useState<{ success: boolean; message: string } | null>(null) // Safely calculate available models for current provider const currentProvider = codebaseIndexConfig?.codebaseIndexEmbedderProvider @@ -83,15 +98,25 @@ export const CodeIndexSettings: React.FC = ({ // Set up interval for periodic status updates // Set up message listener for status updates - const handleMessage = (event: MessageEvent) => { + const handleMessage = (event: MessageEvent) => { if (event.data.type === "indexingStatusUpdate") { + const data = event.data as IndexingStatusUpdateMessage setIndexingStatus({ - systemStatus: event.data.values.systemStatus, - message: event.data.values.message || "", - processedItems: event.data.values.processedItems, - totalItems: event.data.values.totalItems, - currentItemUnit: event.data.values.currentItemUnit || "items", + systemStatus: data.values.systemStatus, + message: data.values.message || "", + processedItems: data.values.processedItems || 0, + totalItems: data.values.totalItems || 0, + currentItemUnit: data.values.currentItemUnit || "items", + errorDetails: data.values.errorDetails, + }) + } else if (event.data.type === "codebaseIndexTestResult") { + setTestingConfig(false) + setTestResult({ + success: event.data.success || false, + message: event.data.message || "Test completed", }) + // Clear test result after 5 seconds + setTimeout(() => setTestResult(null), 5000) } } @@ -240,6 +265,43 @@ export const CodeIndexSettings: React.FC = ({ {indexingStatus.message ? ` - ${indexingStatus.message}` : ""} + {/* Error Details Display */} + {indexingStatus.systemStatus === "Error" && indexingStatus.errorDetails && ( +
+
+ +
+
+ {indexingStatus.errorDetails.type === "configuration" && + t("settings:codeIndex.configurationError")} + {indexingStatus.errorDetails.type === "authentication" && + t("settings:codeIndex.authenticationError")} + {indexingStatus.errorDetails.type === "network" && + t("settings:codeIndex.networkError")} + {indexingStatus.errorDetails.type === "validation" && + t("settings:codeIndex.validationError")} + {indexingStatus.errorDetails.type === "unknown" && + t("settings:codeIndex.unknownError")} +
+
+ {indexingStatus.errorDetails.message} +
+ {indexingStatus.errorDetails.suggestion && ( +
+ + {indexingStatus.errorDetails.suggestion} +
+ )} + {indexingStatus.errorDetails.endpoint && ( +
+ Endpoint: {indexingStatus.errorDetails.endpoint} +
+ )} +
+
+
+ )} + {indexingStatus.systemStatus === "Indexing" && (
= ({ )}
+ { + setTestingConfig(true) + setTestResult(null) + vscode.postMessage({ + type: "codebaseIndexConfig", + action: "test", + values: { + ...codebaseIndexConfig, + // Include API configuration values based on provider + ...(codebaseIndexConfig?.codebaseIndexEmbedderProvider === "openai" && { + codeIndexOpenAiKey: apiConfiguration.codeIndexOpenAiKey, + }), + ...(codebaseIndexConfig?.codebaseIndexEmbedderProvider === + "openai-compatible" && { + codebaseIndexOpenAiCompatibleBaseUrl: + apiConfiguration.codebaseIndexOpenAiCompatibleBaseUrl, + codebaseIndexOpenAiCompatibleApiKey: + apiConfiguration.codebaseIndexOpenAiCompatibleApiKey, + codebaseIndexOpenAiCompatibleModelDimension: + apiConfiguration.codebaseIndexOpenAiCompatibleModelDimension, + }), + ...(codebaseIndexConfig?.codebaseIndexEmbedderProvider === "ollama" && { + codebaseIndexEmbedderBaseUrl: + codebaseIndexConfig.codebaseIndexEmbedderBaseUrl, + }), + }, + }) + }} + disabled={ + testingConfig || + !areSettingsCommitted || + !validateIndexingConfig(codebaseIndexConfig, apiConfiguration) + } + appearance="secondary"> + {testingConfig + ? t("settings:codeIndex.testingButton") + : t("settings:codeIndex.testConfigButton")} + {(indexingStatus.systemStatus === "Error" || indexingStatus.systemStatus === "Standby") && ( vscode.postMessage({ type: "startIndexing" })} @@ -541,6 +642,26 @@ export const CodeIndexSettings: React.FC = ({ )}
+ {/* Test Result Display */} + {testResult && ( +
+
+ + {testResult.message} +
+
+ )} + {/* Advanced Configuration Section */}