diff --git a/packages/types/src/codebase-index.ts b/packages/types/src/codebase-index.ts index 0ad19d8676..ffe9774609 100644 --- a/packages/types/src/codebase-index.ts +++ b/packages/types/src/codebase-index.ts @@ -21,7 +21,7 @@ export const CODEBASE_INDEX_DEFAULTS = { export const codebaseIndexConfigSchema = z.object({ codebaseIndexEnabled: z.boolean().optional(), codebaseIndexQdrantUrl: z.string().optional(), - codebaseIndexEmbedderProvider: z.enum(["openai", "ollama", "openai-compatible", "gemini"]).optional(), + codebaseIndexEmbedderProvider: z.enum(["openai", "ollama", "openai-compatible", "gemini", "lmstudio"]).optional(), codebaseIndexEmbedderBaseUrl: z.string().optional(), codebaseIndexEmbedderModelId: z.string().optional(), codebaseIndexEmbedderModelDimension: z.number().optional(), @@ -47,6 +47,7 @@ export const codebaseIndexModelsSchema = z.object({ ollama: z.record(z.string(), z.object({ dimension: z.number() })).optional(), "openai-compatible": z.record(z.string(), z.object({ dimension: z.number() })).optional(), gemini: z.record(z.string(), z.object({ dimension: z.number() })).optional(), + lmstudio: z.record(z.string(), z.object({ dimension: z.number() })).optional(), }) export type CodebaseIndexModels = z.infer @@ -62,6 +63,7 @@ export const codebaseIndexProviderSchema = z.object({ codebaseIndexOpenAiCompatibleApiKey: z.string().optional(), codebaseIndexOpenAiCompatibleModelDimension: z.number().optional(), codebaseIndexGeminiApiKey: z.string().optional(), + codebaseIndexLmStudioBaseUrl: z.string().optional(), }) export type CodebaseIndexProvider = z.infer diff --git a/src/i18n/locales/ca/embeddings.json b/src/i18n/locales/ca/embeddings.json index 5deed252bf..07535ca92d 100644 --- a/src/i18n/locales/ca/embeddings.json +++ b/src/i18n/locales/ca/embeddings.json @@ -17,6 +17,11 @@ "modelNotEmbeddingCapable": "El model d'Ollama no és capaç de fer incrustacions: {{modelId}}", "hostNotFound": "No s'ha trobat l'amfitrió d'Ollama: {{baseUrl}}" }, + "lmstudio": { + "serviceNotRunning": "El servidor de LM Studio no s'està executant a {{baseUrl}}. Assegureu-vos que LM Studio s'estigui executant amb el servidor local activat.", + "modelNotFound": "No s'ha trobat el model \"{{modelId}}\" a LM Studio. Assegureu-vos que el model estigui carregat a LM Studio.", + "hostNotFound": "No es pot connectar a LM Studio a {{baseUrl}}. Comproveu la configuració de l'URL base." + }, "scanner": { "unknownErrorProcessingFile": "Error desconegut en processar el fitxer {{filePath}}", "unknownErrorDeletingPoints": "Error desconegut en eliminar els punts per a {{filePath}}", diff --git a/src/i18n/locales/de/embeddings.json b/src/i18n/locales/de/embeddings.json index 74381747e1..d7cb72cf3e 100644 --- a/src/i18n/locales/de/embeddings.json +++ b/src/i18n/locales/de/embeddings.json @@ -17,6 +17,11 @@ "modelNotEmbeddingCapable": "Ollama-Modell ist nicht für Einbettungen geeignet: {{modelId}}", "hostNotFound": "Ollama-Host nicht gefunden: {{baseUrl}}" }, + "lmstudio": { + "serviceNotRunning": "Der LM Studio-Server wird unter {{baseUrl}} nicht ausgeführt. Bitte stelle sicher, dass LM Studio mit aktiviertem lokalen Server läuft.", + "modelNotFound": "Modell \"{{modelId}}\" in LM Studio nicht gefunden. Bitte stelle sicher, dass das Modell in LM Studio geladen ist.", + "hostNotFound": "Verbindung zu LM Studio unter {{baseUrl}} nicht möglich. Bitte überprüfe die Konfiguration der Basis-URL." + }, "scanner": { "unknownErrorProcessingFile": "Unbekannter Fehler beim Verarbeiten der Datei {{filePath}}", "unknownErrorDeletingPoints": "Unbekannter Fehler beim Löschen der Punkte für {{filePath}}", diff --git a/src/i18n/locales/en/embeddings.json b/src/i18n/locales/en/embeddings.json index 96b3b2dbea..4def64d454 100644 --- a/src/i18n/locales/en/embeddings.json +++ b/src/i18n/locales/en/embeddings.json @@ -17,6 +17,11 @@ "modelNotEmbeddingCapable": "Ollama model is not embedding capable: {{modelId}}", "hostNotFound": "Ollama host not found: {{baseUrl}}" }, + "lmstudio": { + "serviceNotRunning": "LM Studio server is not running at {{baseUrl}}. Please ensure LM Studio is running with the local server enabled.", + "modelNotFound": "Model \"{{modelId}}\" not found in LM Studio. Please ensure the model is loaded in LM Studio.", + "hostNotFound": "Cannot connect to LM Studio at {{baseUrl}}. Please check the base URL configuration." + }, "scanner": { "unknownErrorProcessingFile": "Unknown error processing file {{filePath}}", "unknownErrorDeletingPoints": "Unknown error deleting points for {{filePath}}", diff --git a/src/i18n/locales/es/embeddings.json b/src/i18n/locales/es/embeddings.json index e47db420eb..acfd43b95b 100644 --- a/src/i18n/locales/es/embeddings.json +++ b/src/i18n/locales/es/embeddings.json @@ -17,6 +17,11 @@ "modelNotEmbeddingCapable": "El modelo Ollama no es capaz de realizar incrustaciones: {{modelId}}", "hostNotFound": "No se encuentra el host de Ollama: {{baseUrl}}" }, + "lmstudio": { + "serviceNotRunning": "El servidor de LM Studio no se está ejecutando en {{baseUrl}}. Asegúrate de que LM Studio se esté ejecutando con el servidor local habilitado.", + "modelNotFound": "No se encontró el modelo \"{{modelId}}\" en LM Studio. Asegúrate de que el modelo esté cargado en LM Studio.", + "hostNotFound": "No se puede conectar a LM Studio en {{baseUrl}}. Comprueba la configuración de la URL base." + }, "scanner": { "unknownErrorProcessingFile": "Error desconocido procesando archivo {{filePath}}", "unknownErrorDeletingPoints": "Error desconocido eliminando puntos para {{filePath}}", diff --git a/src/i18n/locales/fr/embeddings.json b/src/i18n/locales/fr/embeddings.json index c63d3a7fbc..eb836706a1 100644 --- a/src/i18n/locales/fr/embeddings.json +++ b/src/i18n/locales/fr/embeddings.json @@ -17,6 +17,11 @@ "modelNotEmbeddingCapable": "Le modèle Ollama n'est pas capable d'intégrer : {{modelId}}", "hostNotFound": "Hôte Ollama introuvable : {{baseUrl}}" }, + "lmstudio": { + "serviceNotRunning": "Le serveur LM Studio n'est pas en cours d'exécution à {{baseUrl}}. Veuillez vous assurer que LM Studio est en cours d'exécution avec le serveur local activé.", + "modelNotFound": "Le modèle \"{{modelId}}\" n'a pas été trouvé dans LM Studio. Veuillez vous assurer que le modèle est chargé dans LM Studio.", + "hostNotFound": "Impossible de se connecter à LM Studio à {{baseUrl}}. Veuillez vérifier la configuration de l'URL de base." + }, "scanner": { "unknownErrorProcessingFile": "Erreur inconnue lors du traitement du fichier {{filePath}}", "unknownErrorDeletingPoints": "Erreur inconnue lors de la suppression des points pour {{filePath}}", diff --git a/src/i18n/locales/hi/embeddings.json b/src/i18n/locales/hi/embeddings.json index 15709fd700..e44db49231 100644 --- a/src/i18n/locales/hi/embeddings.json +++ b/src/i18n/locales/hi/embeddings.json @@ -17,6 +17,11 @@ "modelNotEmbeddingCapable": "ओलामा मॉडल एम्बेडिंग में सक्षम नहीं है: {{modelId}}", "hostNotFound": "ओलामा होस्ट नहीं मिला: {{baseUrl}}" }, + "lmstudio": { + "serviceNotRunning": "एलएम स्टूडियो सर्वर {{baseUrl}} पर नहीं चल रहा है। कृपया सुनिश्चित करें कि एलएम स्टूडियो स्थानीय सर्वर सक्षम के साथ चल रहा है।", + "modelNotFound": "एलएम स्टूडियो में मॉडल \"{{modelId}}\" नहीं मिला। कृपया सुनिश्चित करें कि मॉडल एलएम स्टूडियो में लोड किया गया है।", + "hostNotFound": "{{baseUrl}} पर एलएम स्टूडियो से कनेक्ट नहीं हो सकता। कृपया आधार यूआरएल कॉन्फ़िगरेशन की जांच करें।" + }, "scanner": { "unknownErrorProcessingFile": "फ़ाइल {{filePath}} प्रसंस्करण में अज्ञात त्रुटि", "unknownErrorDeletingPoints": "{{filePath}} के लिए बिंदु हटाने में अज्ञात त्रुटि", diff --git a/src/i18n/locales/id/embeddings.json b/src/i18n/locales/id/embeddings.json index e78d39d1ab..be49925b46 100644 --- a/src/i18n/locales/id/embeddings.json +++ b/src/i18n/locales/id/embeddings.json @@ -17,6 +17,11 @@ "modelNotEmbeddingCapable": "Model Ollama tidak mampu melakukan embedding: {{modelId}}", "hostNotFound": "Host Ollama tidak ditemukan: {{baseUrl}}" }, + "lmstudio": { + "serviceNotRunning": "Server LM Studio tidak berjalan di {{baseUrl}}. Pastikan LM Studio berjalan dengan server lokal diaktifkan.", + "modelNotFound": "Model \"{{modelId}}\" tidak ditemukan di LM Studio. Pastikan model dimuat di LM Studio.", + "hostNotFound": "Tidak dapat terhubung ke LM Studio di {{baseUrl}}. Silakan periksa konfigurasi URL dasar." + }, "scanner": { "unknownErrorProcessingFile": "Error tidak dikenal saat memproses file {{filePath}}", "unknownErrorDeletingPoints": "Error tidak dikenal saat menghapus points untuk {{filePath}}", diff --git a/src/i18n/locales/it/embeddings.json b/src/i18n/locales/it/embeddings.json index 679b17a25e..16e7e30640 100644 --- a/src/i18n/locales/it/embeddings.json +++ b/src/i18n/locales/it/embeddings.json @@ -17,6 +17,11 @@ "modelNotEmbeddingCapable": "Il modello Ollama non è in grado di eseguire l'embedding: {{modelId}}", "hostNotFound": "Host Ollama non trovato: {{baseUrl}}" }, + "lmstudio": { + "serviceNotRunning": "Il server di LM Studio non è in esecuzione su {{baseUrl}}. Assicurati che LM Studio sia in esecuzione con il server locale abilitato.", + "modelNotFound": "Modello \"{{modelId}}\" non trovato in LM Studio. Assicurati che il modello sia caricato in LM Studio.", + "hostNotFound": "Impossibile connettersi a LM Studio su {{baseUrl}}. Controlla la configurazione dell'URL di base." + }, "scanner": { "unknownErrorProcessingFile": "Errore sconosciuto nell'elaborazione del file {{filePath}}", "unknownErrorDeletingPoints": "Errore sconosciuto nell'eliminazione dei punti per {{filePath}}", diff --git a/src/i18n/locales/ja/embeddings.json b/src/i18n/locales/ja/embeddings.json index 89136eb1cc..dea8087395 100644 --- a/src/i18n/locales/ja/embeddings.json +++ b/src/i18n/locales/ja/embeddings.json @@ -17,6 +17,11 @@ "modelNotEmbeddingCapable": "Ollamaモデルは埋め込みに対応していません:{{modelId}}", "hostNotFound": "Ollamaホストが見つかりません:{{baseUrl}}" }, + "lmstudio": { + "serviceNotRunning": "LM Studioサーバーが{{baseUrl}}で実行されていません。LM Studioがローカルサーバーを有効にして実行されていることを確認してください。", + "modelNotFound": "モデル「{{modelId}}」がLM Studioで見つかりません。モデルがLM Studioにロードされていることを確認してください。", + "hostNotFound": "{{baseUrl}}のLM Studioに接続できません。ベースURLの構成を確認してください。" + }, "scanner": { "unknownErrorProcessingFile": "ファイル{{filePath}}の処理中に不明なエラーが発生しました", "unknownErrorDeletingPoints": "{{filePath}}のポイント削除中に不明なエラーが発生しました", diff --git a/src/i18n/locales/ko/embeddings.json b/src/i18n/locales/ko/embeddings.json index 7129883ad7..1bbb7cc5dd 100644 --- a/src/i18n/locales/ko/embeddings.json +++ b/src/i18n/locales/ko/embeddings.json @@ -17,6 +17,11 @@ "modelNotEmbeddingCapable": "Ollama 모델은 임베딩이 불가능합니다: {{modelId}}", "hostNotFound": "Ollama 호스트를 찾을 수 없습니다: {{baseUrl}}" }, + "lmstudio": { + "serviceNotRunning": "LM Studio 서버가 {{baseUrl}}에서 실행되고 있지 않습니다. 로컬 서버가 활성화된 상태로 LM Studio가 실행 중인지 확인하세요.", + "modelNotFound": "LM Studio에서 \"{{modelId}}\" 모델을 찾을 수 없습니다. 모델이 LM Studio에 로드되었는지 확인하세요.", + "hostNotFound": "{{baseUrl}}에서 LM Studio에 연결할 수 없습니다. 기본 URL 구성을 확인하세요." + }, "scanner": { "unknownErrorProcessingFile": "파일 {{filePath}} 처리 중 알 수 없는 오류", "unknownErrorDeletingPoints": "{{filePath}}의 포인트 삭제 중 알 수 없는 오류", diff --git a/src/i18n/locales/nl/embeddings.json b/src/i18n/locales/nl/embeddings.json index ede20774ac..71b450e5a4 100644 --- a/src/i18n/locales/nl/embeddings.json +++ b/src/i18n/locales/nl/embeddings.json @@ -17,6 +17,11 @@ "modelNotEmbeddingCapable": "Ollama-model is niet in staat tot insluiten: {{modelId}}", "hostNotFound": "Ollama-host niet gevonden: {{baseUrl}}" }, + "lmstudio": { + "serviceNotRunning": "LM Studio-server draait niet op {{baseUrl}}. Zorg ervoor dat LM Studio draait met de lokale server ingeschakeld.", + "modelNotFound": "Model \"{{modelId}}\" niet gevonden in LM Studio. Zorg ervoor dat het model in LM Studio is geladen.", + "hostNotFound": "Kan geen verbinding maken met LM Studio op {{baseUrl}}. Controleer de basis-URL-configuratie." + }, "scanner": { "unknownErrorProcessingFile": "Onbekende fout bij verwerken van bestand {{filePath}}", "unknownErrorDeletingPoints": "Onbekende fout bij verwijderen van punten voor {{filePath}}", diff --git a/src/i18n/locales/pl/embeddings.json b/src/i18n/locales/pl/embeddings.json index 70279021bd..cfa777bc64 100644 --- a/src/i18n/locales/pl/embeddings.json +++ b/src/i18n/locales/pl/embeddings.json @@ -17,6 +17,11 @@ "modelNotEmbeddingCapable": "Model Ollama nie jest zdolny do osadzania: {{modelId}}", "hostNotFound": "Nie znaleziono hosta Ollama: {{baseUrl}}" }, + "lmstudio": { + "serviceNotRunning": "Serwer LM Studio nie działa pod adresem {{baseUrl}}. Upewnij się, że LM Studio jest uruchomione z włączonym serwerem lokalnym.", + "modelNotFound": "Nie znaleziono modelu \"{{modelId}}\" w LM Studio. Upewnij się, że model jest załadowany w LM Studio.", + "hostNotFound": "Nie można połączyć się z LM Studio pod adresem {{baseUrl}}. Sprawdź konfigurację podstawowego adresu URL." + }, "scanner": { "unknownErrorProcessingFile": "Nieznany błąd podczas przetwarzania pliku {{filePath}}", "unknownErrorDeletingPoints": "Nieznany błąd podczas usuwania punktów dla {{filePath}}", diff --git a/src/i18n/locales/pt-BR/embeddings.json b/src/i18n/locales/pt-BR/embeddings.json index aea1bb5007..4089ac8c48 100644 --- a/src/i18n/locales/pt-BR/embeddings.json +++ b/src/i18n/locales/pt-BR/embeddings.json @@ -17,6 +17,11 @@ "modelNotEmbeddingCapable": "O modelo Ollama não é capaz de embedding: {{modelId}}", "hostNotFound": "Host Ollama não encontrado: {{baseUrl}}" }, + "lmstudio": { + "serviceNotRunning": "O servidor do LM Studio não está em execução em {{baseUrl}}. Certifique-se de que o LM Studio esteja em execução com o servidor local ativado.", + "modelNotFound": "Modelo \"{{modelId}}\" não encontrado no LM Studio. Certifique-se de que o modelo esteja carregado no LM Studio.", + "hostNotFound": "Não é possível conectar-se ao LM Studio em {{baseUrl}}. Verifique a configuração do URL base." + }, "scanner": { "unknownErrorProcessingFile": "Erro desconhecido ao processar arquivo {{filePath}}", "unknownErrorDeletingPoints": "Erro desconhecido ao deletar pontos para {{filePath}}", diff --git a/src/i18n/locales/ru/embeddings.json b/src/i18n/locales/ru/embeddings.json index a724539b76..abae7892a4 100644 --- a/src/i18n/locales/ru/embeddings.json +++ b/src/i18n/locales/ru/embeddings.json @@ -17,6 +17,11 @@ "modelNotEmbeddingCapable": "Модель Ollama не способна к вложению: {{modelId}}", "hostNotFound": "Хост Ollama не найден: {{baseUrl}}" }, + "lmstudio": { + "serviceNotRunning": "Сервер LM Studio не запущен по адресу {{baseUrl}}. Убедитесь, что LM Studio запущен с включенным локальным сервером.", + "modelNotFound": "Модель \"{{modelId}}\" не найдена в LM Studio. Убедитесь, что модель загружена в LM Studio.", + "hostNotFound": "Не удается подключиться к LM Studio по адресу {{baseUrl}}. Проверьте конфигурацию базового URL-адреса." + }, "scanner": { "unknownErrorProcessingFile": "Неизвестная ошибка при обработке файла {{filePath}}", "unknownErrorDeletingPoints": "Неизвестная ошибка при удалении точек для {{filePath}}", diff --git a/src/i18n/locales/tr/embeddings.json b/src/i18n/locales/tr/embeddings.json index 3e115ce103..a09d2f12e1 100644 --- a/src/i18n/locales/tr/embeddings.json +++ b/src/i18n/locales/tr/embeddings.json @@ -17,6 +17,11 @@ "modelNotEmbeddingCapable": "Ollama modeli gömme yeteneğine sahip değil: {{modelId}}", "hostNotFound": "Ollama ana bilgisayarı bulunamadı: {{baseUrl}}" }, + "lmstudio": { + "serviceNotRunning": "LM Studio sunucusu {{baseUrl}} adresinde çalışmıyor. Lütfen LM Studio'nun yerel sunucu etkinken çalıştığından emin olun.", + "modelNotFound": "LM Studio'da \"{{modelId}}\" modeli bulunamadı. Lütfen modelin LM Studio'da yüklü olduğundan emin olun.", + "hostNotFound": "LM Studio'ya {{baseUrl}} adresinden bağlanılamıyor. Lütfen temel URL yapılandırmasını kontrol edin." + }, "scanner": { "unknownErrorProcessingFile": "{{filePath}} dosyası işlenirken bilinmeyen hata", "unknownErrorDeletingPoints": "{{filePath}} için noktalar silinirken bilinmeyen hata", diff --git a/src/i18n/locales/vi/embeddings.json b/src/i18n/locales/vi/embeddings.json index 9ef61105fa..509f4034d9 100644 --- a/src/i18n/locales/vi/embeddings.json +++ b/src/i18n/locales/vi/embeddings.json @@ -17,6 +17,11 @@ "modelNotEmbeddingCapable": "Mô hình Ollama không có khả năng nhúng: {{modelId}}", "hostNotFound": "Không tìm thấy máy chủ Ollama: {{baseUrl}}" }, + "lmstudio": { + "serviceNotRunning": "Máy chủ LM Studio không chạy tại {{baseUrl}}. Vui lòng đảm bảo LM Studio đang chạy với máy chủ cục bộ được bật.", + "modelNotFound": "Không tìm thấy mô hình \"{{modelId}}\" trong LM Studio. Vui lòng đảm bảo mô hình đã được tải trong LM Studio.", + "hostNotFound": "Không thể kết nối với LM Studio tại {{baseUrl}}. Vui lòng kiểm tra cấu hình URL cơ sở." + }, "scanner": { "unknownErrorProcessingFile": "Lỗi không xác định khi xử lý tệp {{filePath}}", "unknownErrorDeletingPoints": "Lỗi không xác định khi xóa điểm cho {{filePath}}", diff --git a/src/i18n/locales/zh-CN/embeddings.json b/src/i18n/locales/zh-CN/embeddings.json index d3ded6e5a2..ab296c2efb 100644 --- a/src/i18n/locales/zh-CN/embeddings.json +++ b/src/i18n/locales/zh-CN/embeddings.json @@ -17,6 +17,11 @@ "modelNotEmbeddingCapable": "Ollama 模型不具备嵌入能力:{{modelId}}", "hostNotFound": "未找到 Ollama 主机:{{baseUrl}}" }, + "lmstudio": { + "serviceNotRunning": "LM Studio 服务器未在 {{baseUrl}} 运行。请确保 LM Studio 已启用本地服务器并正在运行。", + "modelNotFound": "在 LM Studio 中未找到模型“{{modelId}}”。请确保该模型已在 LM Studio 中加载。", + "hostNotFound": "无法连接到 {{baseUrl}} 上的 LM Studio。请检查基本 URL 配置。" + }, "scanner": { "unknownErrorProcessingFile": "处理文件 {{filePath}} 时出现未知错误", "unknownErrorDeletingPoints": "删除 {{filePath}} 的数据点时出现未知错误", diff --git a/src/i18n/locales/zh-TW/embeddings.json b/src/i18n/locales/zh-TW/embeddings.json index 5ab5dcb292..a33790592c 100644 --- a/src/i18n/locales/zh-TW/embeddings.json +++ b/src/i18n/locales/zh-TW/embeddings.json @@ -17,6 +17,11 @@ "modelNotEmbeddingCapable": "Ollama 模型不具備內嵌能力:{{modelId}}", "hostNotFound": "找不到 Ollama 主機:{{baseUrl}}" }, + "lmstudio": { + "serviceNotRunning": "LM Studio 伺服器未在 {{baseUrl}} 執行。請確保 LM Studio 已啟用本機伺服器並正在執行。", + "modelNotFound": "在 LM Studio 中找不到模型「{{modelId}}」。請確保該模型已在 LM Studio 中載入。", + "hostNotFound": "無法連線至 {{baseUrl}} 上的 LM Studio。請檢查基礎 URL 設定。" + }, "scanner": { "unknownErrorProcessingFile": "處理檔案 {{filePath}} 時發生未知錯誤", "unknownErrorDeletingPoints": "刪除 {{filePath}} 的資料點時發生未知錯誤", diff --git a/src/services/code-index/__tests__/config-manager.spec.ts b/src/services/code-index/__tests__/config-manager.spec.ts index 641abfa306..d55c0f1447 100644 --- a/src/services/code-index/__tests__/config-manager.spec.ts +++ b/src/services/code-index/__tests__/config-manager.spec.ts @@ -50,6 +50,7 @@ describe("CodeIndexConfigManager", () => { modelId: undefined, openAiOptions: { openAiNativeApiKey: "" }, ollamaOptions: { ollamaBaseUrl: "" }, + lmStudioOptions: { lmStudioBaseUrl: "" }, qdrantUrl: "http://localhost:6333", qdrantApiKey: "", searchMinScore: 0.4, @@ -81,6 +82,9 @@ describe("CodeIndexConfigManager", () => { modelId: "text-embedding-3-large", openAiOptions: { openAiNativeApiKey: "test-openai-key" }, ollamaOptions: { ollamaBaseUrl: "" }, + openAiCompatibleOptions: undefined, + geminiOptions: undefined, + lmStudioOptions: { lmStudioBaseUrl: "" }, qdrantUrl: "http://qdrant.local", qdrantApiKey: "test-qdrant-key", searchMinScore: 0.4, @@ -117,7 +121,10 @@ describe("CodeIndexConfigManager", () => { openAiCompatibleOptions: { baseUrl: "https://api.example.com/v1", apiKey: "test-openai-compatible-key", + modelDimension: undefined, }, + geminiOptions: undefined, + lmStudioOptions: { lmStudioBaseUrl: "" }, qdrantUrl: "http://qdrant.local", qdrantApiKey: "test-qdrant-key", searchMinScore: 0.4, @@ -156,6 +163,8 @@ describe("CodeIndexConfigManager", () => { baseUrl: "https://api.example.com/v1", apiKey: "test-openai-compatible-key", }, + geminiOptions: undefined, + lmStudioOptions: { lmStudioBaseUrl: "" }, qdrantUrl: "http://qdrant.local", qdrantApiKey: "test-qdrant-key", searchMinScore: 0.4, @@ -192,8 +201,10 @@ describe("CodeIndexConfigManager", () => { openAiCompatibleOptions: { baseUrl: "https://api.example.com/v1", apiKey: "test-openai-compatible-key", - // modelDimension is undefined when not set + modelDimension: undefined, }, + geminiOptions: undefined, + lmStudioOptions: { lmStudioBaseUrl: "" }, qdrantUrl: "http://qdrant.local", qdrantApiKey: "test-qdrant-key", searchMinScore: 0.4, @@ -233,6 +244,9 @@ describe("CodeIndexConfigManager", () => { apiKey: "test-openai-compatible-key", }, geminiOptions: undefined, + lmStudioOptions: { + lmStudioBaseUrl: "", + }, qdrantUrl: "http://qdrant.local", qdrantApiKey: "test-qdrant-key", searchMinScore: 0.4, @@ -1206,8 +1220,9 @@ describe("CodeIndexConfigManager", () => { modelId: "text-embedding-3-large", openAiOptions: { openAiNativeApiKey: "test-openai-key" }, ollamaOptions: { ollamaBaseUrl: undefined }, - geminiOptions: undefined, openAiCompatibleOptions: undefined, + geminiOptions: undefined, + lmStudioOptions: { lmStudioBaseUrl: undefined }, qdrantUrl: "http://qdrant.local", qdrantApiKey: "test-qdrant-key", searchMinScore: 0.4, diff --git a/src/services/code-index/__tests__/service-factory.spec.ts b/src/services/code-index/__tests__/service-factory.spec.ts index 65932225eb..beb1d5d072 100644 --- a/src/services/code-index/__tests__/service-factory.spec.ts +++ b/src/services/code-index/__tests__/service-factory.spec.ts @@ -4,6 +4,7 @@ import { OpenAiEmbedder } from "../embedders/openai" import { CodeIndexOllamaEmbedder } from "../embedders/ollama" import { OpenAICompatibleEmbedder } from "../embedders/openai-compatible" import { GeminiEmbedder } from "../embedders/gemini" +import { CodeIndexLmStudioEmbedder } from "../embedders/lmstudio" import { QdrantVectorStore } from "../vector-store/qdrant-client" // Mock the embedders and vector store @@ -11,6 +12,7 @@ vitest.mock("../embedders/openai") vitest.mock("../embedders/ollama") vitest.mock("../embedders/openai-compatible") vitest.mock("../embedders/gemini") +vitest.mock("../embedders/lmstudio") vitest.mock("../vector-store/qdrant-client") // Mock the embedding models module @@ -23,6 +25,7 @@ const MockedOpenAiEmbedder = OpenAiEmbedder as MockedClass const MockedOpenAICompatibleEmbedder = OpenAICompatibleEmbedder as MockedClass const MockedGeminiEmbedder = GeminiEmbedder as MockedClass +const MockedCodeIndexLmStudioEmbedder = CodeIndexLmStudioEmbedder as MockedClass const MockedQdrantVectorStore = QdrantVectorStore as MockedClass // Import the mocked functions @@ -299,6 +302,77 @@ describe("CodeIndexServiceFactory", () => { expect(() => factory.createEmbedder()).toThrow("serviceFactory.geminiConfigMissing") }) + it("should pass model ID to LM Studio embedder when using LM Studio provider", () => { + // Arrange + const testModelId = "nomic-embed-text-v1.5" + const testConfig = { + embedderProvider: "lmstudio", + modelId: testModelId, + lmStudioOptions: { + lmStudioBaseUrl: "http://localhost:1234", + }, + } + mockConfigManager.getConfig.mockReturnValue(testConfig as any) + + // Act + factory.createEmbedder() + + // Assert + expect(MockedCodeIndexLmStudioEmbedder).toHaveBeenCalledWith({ + lmStudioBaseUrl: "http://localhost:1234", + embeddingModelId: testModelId, + }) + }) + + it("should handle undefined model ID for LM Studio embedder", () => { + // Arrange + const testConfig = { + embedderProvider: "lmstudio", + modelId: undefined, + lmStudioOptions: { + lmStudioBaseUrl: "http://localhost:1234", + }, + } + mockConfigManager.getConfig.mockReturnValue(testConfig as any) + + // Act + factory.createEmbedder() + + // Assert + expect(MockedCodeIndexLmStudioEmbedder).toHaveBeenCalledWith({ + lmStudioBaseUrl: "http://localhost:1234", + embeddingModelId: undefined, + }) + }) + + it("should throw error when LM Studio base URL is missing", () => { + // Arrange + const testConfig = { + embedderProvider: "lmstudio", + modelId: "nomic-embed-text-v1.5", + lmStudioOptions: { + lmStudioBaseUrl: undefined, + }, + } + mockConfigManager.getConfig.mockReturnValue(testConfig as any) + + // Act & Assert + expect(() => factory.createEmbedder()).toThrow("LM Studio configuration missing for embedder creation") + }) + + it("should throw error when LM Studio options are missing", () => { + // Arrange + const testConfig = { + embedderProvider: "lmstudio", + modelId: "nomic-embed-text-v1.5", + lmStudioOptions: undefined, + } + mockConfigManager.getConfig.mockReturnValue(testConfig as any) + + // Act & Assert + expect(() => factory.createEmbedder()).toThrow("LM Studio configuration missing for embedder creation") + }) + it("should throw error for invalid embedder provider", () => { // Arrange const testConfig = { @@ -522,6 +596,31 @@ describe("CodeIndexServiceFactory", () => { ) }) + it("should use config.modelId for LM Studio provider", () => { + // Arrange + const testModelId = "nomic-embed-text-v1.5" + const testConfig = { + embedderProvider: "lmstudio", + modelId: testModelId, + qdrantUrl: "http://localhost:6333", + qdrantApiKey: "test-key", + } + mockConfigManager.getConfig.mockReturnValue(testConfig as any) + mockGetModelDimension.mockReturnValue(768) + + // Act + factory.createVectorStore() + + // Assert + expect(mockGetModelDimension).toHaveBeenCalledWith("lmstudio", testModelId) + expect(MockedQdrantVectorStore).toHaveBeenCalledWith( + "/test/workspace", + "http://localhost:6333", + 768, + "test-key", + ) + }) + it("should use default model when config.modelId is undefined", () => { // Arrange const testConfig = { diff --git a/src/services/code-index/config-manager.ts b/src/services/code-index/config-manager.ts index 245621a1bd..adee27156f 100644 --- a/src/services/code-index/config-manager.ts +++ b/src/services/code-index/config-manager.ts @@ -17,6 +17,7 @@ export class CodeIndexConfigManager { private ollamaOptions?: ApiHandlerOptions private openAiCompatibleOptions?: { baseUrl: string; apiKey: string } private geminiOptions?: { apiKey: string } + private lmStudioOptions?: ApiHandlerOptions private qdrantUrl?: string = "http://localhost:6333" private qdrantApiKey?: string private searchMinScore?: number @@ -92,13 +93,15 @@ export class CodeIndexConfigManager { this.openAiOptions = { openAiNativeApiKey: openAiKey } - // Set embedder provider with support for openai-compatible + // Set embedder provider with support for all providers if (codebaseIndexEmbedderProvider === "ollama") { this.embedderProvider = "ollama" } else if (codebaseIndexEmbedderProvider === "openai-compatible") { this.embedderProvider = "openai-compatible" } else if (codebaseIndexEmbedderProvider === "gemini") { this.embedderProvider = "gemini" + } else if (codebaseIndexEmbedderProvider === "lmstudio") { + this.embedderProvider = "lmstudio" } else { this.embedderProvider = "openai" } @@ -118,6 +121,10 @@ export class CodeIndexConfigManager { : undefined this.geminiOptions = geminiApiKey ? { apiKey: geminiApiKey } : undefined + + this.lmStudioOptions = { + lmStudioBaseUrl: codebaseIndexEmbedderBaseUrl, + } } /** @@ -134,6 +141,7 @@ export class CodeIndexConfigManager { ollamaOptions?: ApiHandlerOptions openAiCompatibleOptions?: { baseUrl: string; apiKey: string } geminiOptions?: { apiKey: string } + lmStudioOptions?: ApiHandlerOptions qdrantUrl?: string qdrantApiKey?: string searchMinScore?: number @@ -152,6 +160,7 @@ export class CodeIndexConfigManager { openAiCompatibleBaseUrl: this.openAiCompatibleOptions?.baseUrl ?? "", openAiCompatibleApiKey: this.openAiCompatibleOptions?.apiKey ?? "", geminiApiKey: this.geminiOptions?.apiKey ?? "", + lmStudioBaseUrl: this.lmStudioOptions?.lmStudioBaseUrl ?? "", qdrantUrl: this.qdrantUrl ?? "", qdrantApiKey: this.qdrantApiKey ?? "", } @@ -175,6 +184,7 @@ export class CodeIndexConfigManager { ollamaOptions: this.ollamaOptions, openAiCompatibleOptions: this.openAiCompatibleOptions, geminiOptions: this.geminiOptions, + lmStudioOptions: this.lmStudioOptions, qdrantUrl: this.qdrantUrl, qdrantApiKey: this.qdrantApiKey, searchMinScore: this.currentSearchMinScore, @@ -207,6 +217,12 @@ export class CodeIndexConfigManager { const qdrantUrl = this.qdrantUrl const isConfigured = !!(apiKey && qdrantUrl) return isConfigured + } else if (this.embedderProvider === "lmstudio") { + // Lm Studio model ID has a default, so only base URL is strictly required for config + const lmStudioBaseUrl = this.lmStudioOptions?.lmStudioBaseUrl + const qdrantUrl = this.qdrantUrl + const isConfigured = !!(lmStudioBaseUrl && qdrantUrl) + return isConfigured } return false // Should not happen if embedderProvider is always set correctly } @@ -240,6 +256,7 @@ export class CodeIndexConfigManager { const prevOpenAiCompatibleApiKey = prev?.openAiCompatibleApiKey ?? "" const prevModelDimension = prev?.modelDimension const prevGeminiApiKey = prev?.geminiApiKey ?? "" + const prevLmStudioBaseUrl = prev?.lmStudioBaseUrl ?? "" const prevQdrantUrl = prev?.qdrantUrl ?? "" const prevQdrantApiKey = prev?.qdrantApiKey ?? "" @@ -269,6 +286,7 @@ export class CodeIndexConfigManager { const currentOpenAiCompatibleApiKey = this.openAiCompatibleOptions?.apiKey ?? "" const currentModelDimension = this.modelDimension const currentGeminiApiKey = this.geminiOptions?.apiKey ?? "" + const currentLmStudioBaseUrl = this.lmStudioOptions?.lmStudioBaseUrl ?? "" const currentQdrantUrl = this.qdrantUrl ?? "" const currentQdrantApiKey = this.qdrantApiKey ?? "" @@ -292,6 +310,14 @@ export class CodeIndexConfigManager { return true } + if (prevGeminiApiKey !== currentGeminiApiKey) { + return true + } + + if (prevLmStudioBaseUrl !== currentLmStudioBaseUrl) { + return true + } + if (prevQdrantUrl !== currentQdrantUrl || prevQdrantApiKey !== currentQdrantApiKey) { return true } @@ -343,6 +369,7 @@ export class CodeIndexConfigManager { ollamaOptions: this.ollamaOptions, openAiCompatibleOptions: this.openAiCompatibleOptions, geminiOptions: this.geminiOptions, + lmStudioOptions: this.lmStudioOptions, qdrantUrl: this.qdrantUrl, qdrantApiKey: this.qdrantApiKey, searchMinScore: this.currentSearchMinScore, diff --git a/src/services/code-index/embedders/__tests__/lmstudio.spec.ts b/src/services/code-index/embedders/__tests__/lmstudio.spec.ts new file mode 100644 index 0000000000..8c69c5e104 --- /dev/null +++ b/src/services/code-index/embedders/__tests__/lmstudio.spec.ts @@ -0,0 +1,210 @@ +import { vitest, describe, it, expect, beforeEach, afterEach } from "vitest" +import type { MockedFunction } from "vitest" +import { CodeIndexLmStudioEmbedder } from "../lmstudio" +import { OpenAI } from "openai" + +vitest.mock("openai", () => { + const mockEmbeddingsCreate = vitest.fn() + return { + OpenAI: vitest.fn().mockImplementation(() => ({ + embeddings: { + create: mockEmbeddingsCreate, + }, + })), + } +}) + +const consoleMocks = { + error: vitest.spyOn(console, "error").mockImplementation(() => {}), + warn: vitest.spyOn(console, "warn").mockImplementation(() => {}), +} + +describe("CodeIndexLmStudioEmbedder", () => { + let embedder: CodeIndexLmStudioEmbedder + let mockEmbeddingsCreate: MockedFunction + + beforeEach(() => { + vitest.clearAllMocks() + consoleMocks.error.mockClear() + consoleMocks.warn.mockClear() + + const MockedOpenAI = OpenAI as any + mockEmbeddingsCreate = vitest.fn() + MockedOpenAI.mockImplementation(() => ({ + embeddings: { + create: mockEmbeddingsCreate, + }, + })) + + embedder = new CodeIndexLmStudioEmbedder({ + lmStudioBaseUrl: "http://localhost:1234", + embeddingModelId: "text-embedding-nomic-embed-text-v1.5@f16", + }) + }) + + afterEach(() => { + vitest.clearAllMocks() + }) + + describe("constructor", () => { + it("should initialize with provided options", () => { + expect(embedder.embedderInfo.name).toBe("lmstudio") + }) + + it("should use default values when not provided", () => { + const embedderWithDefaults = new CodeIndexLmStudioEmbedder({}) + expect(embedderWithDefaults.embedderInfo.name).toBe("lmstudio") + }) + + it("should normalize base URL to include /v1", () => { + const embedderWithoutV1 = new CodeIndexLmStudioEmbedder({ + lmStudioBaseUrl: "http://localhost:1234", + }) + expect(embedderWithoutV1.embedderInfo.name).toBe("lmstudio") + + const embedderWithV1 = new CodeIndexLmStudioEmbedder({ + lmStudioBaseUrl: "http://localhost:1234/v1", + }) + expect(embedderWithV1.embedderInfo.name).toBe("lmstudio") + }) + }) + + describe("validateConfiguration", () => { + it("should validate successfully with valid configuration", async () => { + const mockResponse = { + data: [{ embedding: [0.1, 0.2, 0.3] }], + usage: { prompt_tokens: 2, total_tokens: 2 }, + } + mockEmbeddingsCreate.mockResolvedValue(mockResponse) + + const result = await embedder.validateConfiguration() + + expect(result.valid).toBe(true) + expect(result.error).toBeUndefined() + expect(mockEmbeddingsCreate).toHaveBeenCalledWith({ + input: ["test"], + model: "text-embedding-nomic-embed-text-v1.5@f16", + encoding_format: "float", + }) + }) + + it("should fail validation when response has no data", async () => { + const mockResponse = { + data: [], + usage: { prompt_tokens: 0, total_tokens: 0 }, + } + mockEmbeddingsCreate.mockResolvedValue(mockResponse) + + const result = await embedder.validateConfiguration() + + expect(result.valid).toBe(false) + expect(result.error).toBe("embeddings:validation.invalidResponse") + }) + + it("should fail validation when LM Studio is not running (ECONNREFUSED)", async () => { + const error = new Error("ECONNREFUSED") + ;(error as any).code = "ECONNREFUSED" + mockEmbeddingsCreate.mockRejectedValue(error) + + const result = await embedder.validateConfiguration() + + expect(result.valid).toBe(false) + expect(result.error).toBe("lmstudio.serviceNotRunning") + }) + + it("should fail validation when model is not found (404)", async () => { + const error = new Error("HTTP 404: Not Found") + ;(error as any).status = 404 + mockEmbeddingsCreate.mockRejectedValue(error) + + const result = await embedder.validateConfiguration() + + expect(result.valid).toBe(false) + expect(result.error).toBe("lmstudio.modelNotFound") + }) + + it("should fail validation when host is not found (ENOTFOUND)", async () => { + const error = new Error("ENOTFOUND") + ;(error as any).code = "ENOTFOUND" + mockEmbeddingsCreate.mockRejectedValue(error) + + const result = await embedder.validateConfiguration() + + expect(result.valid).toBe(false) + expect(result.error).toBe("lmstudio.hostNotFound") + }) + + it("should handle generic errors with standard error handling", async () => { + const error = new Error("Unknown error") + mockEmbeddingsCreate.mockRejectedValue(error) + + const result = await embedder.validateConfiguration() + + expect(result.valid).toBe(false) + + expect(result.error).toBe("embeddings:validation.configurationError") + }) + + it("should handle fetch failed errors", async () => { + const error = new Error("fetch failed") + mockEmbeddingsCreate.mockRejectedValue(error) + + const result = await embedder.validateConfiguration() + + expect(result.valid).toBe(false) + expect(result.error).toBe("lmstudio.serviceNotRunning") + }) + }) + + describe("createEmbeddings", () => { + it("should create embeddings successfully", async () => { + const mockResponse = { + data: [{ embedding: [0.1, 0.2, 0.3] }, { embedding: [0.4, 0.5, 0.6] }], + usage: { prompt_tokens: 10, total_tokens: 10 }, + } + mockEmbeddingsCreate.mockResolvedValue(mockResponse) + + const result = await embedder.createEmbeddings(["test1", "test2"]) + + expect(result.embeddings).toEqual([ + [0.1, 0.2, 0.3], + [0.4, 0.5, 0.6], + ]) + expect(result.usage).toEqual({ promptTokens: 10, totalTokens: 10 }) + }) + + it("should handle rate limit errors with retry", async () => { + const error = new Error("Rate limit exceeded") + ;(error as any).status = 429 + + mockEmbeddingsCreate.mockRejectedValueOnce(error) + + const mockResponse = { + data: [{ embedding: [0.1, 0.2, 0.3] }], + usage: { prompt_tokens: 5, total_tokens: 5 }, + } + mockEmbeddingsCreate.mockResolvedValueOnce(mockResponse) + + const result = await embedder.createEmbeddings(["test"]) + + expect(result.embeddings).toEqual([[0.1, 0.2, 0.3]]) + expect(mockEmbeddingsCreate).toHaveBeenCalledTimes(2) + }) + + it("should skip texts that exceed token limit", async () => { + const longText = "a".repeat(100000) + const shortText = "short text" + + const mockResponse = { + data: [{ embedding: [0.1, 0.2, 0.3] }], + usage: { prompt_tokens: 3, total_tokens: 3 }, + } + mockEmbeddingsCreate.mockResolvedValue(mockResponse) + + const result = await embedder.createEmbeddings([longText, shortText]) + + expect(result.embeddings).toEqual([[0.1, 0.2, 0.3]]) + expect(consoleMocks.warn).toHaveBeenCalledWith(expect.stringContaining("exceeds maximum token limit")) + }) + }) +}) diff --git a/src/services/code-index/embedders/__tests__/ollama.spec.ts b/src/services/code-index/embedders/__tests__/ollama.spec.ts index 7d625a83fe..25a3b3865b 100644 --- a/src/services/code-index/embedders/__tests__/ollama.spec.ts +++ b/src/services/code-index/embedders/__tests__/ollama.spec.ts @@ -5,38 +5,6 @@ import { CodeIndexOllamaEmbedder } from "../ollama" // Mock fetch global.fetch = vitest.fn() as MockedFunction -// Mock i18n -vitest.mock("../../../../i18n", () => ({ - t: (key: string, params?: Record) => { - const translations: Record = { - "embeddings:validation.serviceUnavailable": - "The embedder service is not available. Please ensure it is running and accessible.", - "embeddings:validation.modelNotAvailable": - "The specified model is not available. Please check your model configuration.", - "embeddings:validation.connectionFailed": - "Failed to connect to the embedder service. Please check your connection settings and ensure the service is running.", - "embeddings:validation.configurationError": "Invalid embedder configuration. Please review your settings.", - "embeddings:errors.ollama.serviceNotRunning": - "Ollama service is not running at {{baseUrl}}. Please start Ollama first.", - "embeddings:errors.ollama.serviceUnavailable": - "Ollama service is unavailable at {{baseUrl}}. HTTP status: {{status}}", - "embeddings:errors.ollama.modelNotFound": - "Model '{{model}}' not found. Available models: {{availableModels}}", - "embeddings:errors.ollama.modelNotEmbedding": "Model '{{model}}' is not embedding capable", - "embeddings:errors.ollama.hostNotFound": "Ollama host not found: {{baseUrl}}", - "embeddings:errors.ollama.connectionTimeout": "Connection to Ollama timed out at {{baseUrl}}", - } - // Handle parameter substitution - let result = translations[key] || key - if (params) { - Object.entries(params).forEach(([param, value]) => { - result = result.replace(new RegExp(`{{${param}}}`, "g"), String(value)) - }) - } - return result - }, -})) - // Mock console methods const consoleMocks = { error: vitest.spyOn(console, "error").mockImplementation(() => {}), @@ -127,7 +95,7 @@ describe("CodeIndexOllamaEmbedder", () => { const result = await embedder.validateConfiguration() expect(result.valid).toBe(false) - expect(result.error).toBe("embeddings:ollama.serviceNotRunning") + expect(result.error).toBe("ollama.serviceNotRunning") }) it("should fail validation when tags endpoint returns 404", async () => { @@ -141,7 +109,7 @@ describe("CodeIndexOllamaEmbedder", () => { const result = await embedder.validateConfiguration() expect(result.valid).toBe(false) - expect(result.error).toBe("embeddings:ollama.serviceNotRunning") + expect(result.error).toBe("ollama.serviceNotRunning") }) it("should fail validation when tags endpoint returns other error", async () => { @@ -155,7 +123,7 @@ describe("CodeIndexOllamaEmbedder", () => { const result = await embedder.validateConfiguration() expect(result.valid).toBe(false) - expect(result.error).toBe("embeddings:ollama.serviceUnavailable") + expect(result.error).toBe("ollama.serviceUnavailable") }) it("should fail validation when model does not exist", async () => { @@ -174,7 +142,7 @@ describe("CodeIndexOllamaEmbedder", () => { const result = await embedder.validateConfiguration() expect(result.valid).toBe(false) - expect(result.error).toBe("embeddings:ollama.modelNotFound") + expect(result.error).toBe("ollama.modelNotFound") }) it("should fail validation when model exists but doesn't support embeddings", async () => { @@ -201,7 +169,7 @@ describe("CodeIndexOllamaEmbedder", () => { const result = await embedder.validateConfiguration() expect(result.valid).toBe(false) - expect(result.error).toBe("embeddings:ollama.modelNotEmbeddingCapable") + expect(result.error).toBe("ollama.modelNotEmbeddingCapable") }) it("should handle ECONNREFUSED errors", async () => { @@ -210,7 +178,7 @@ describe("CodeIndexOllamaEmbedder", () => { const result = await embedder.validateConfiguration() expect(result.valid).toBe(false) - expect(result.error).toBe("embeddings:ollama.serviceNotRunning") + expect(result.error).toBe("ollama.serviceNotRunning") }) it("should handle ENOTFOUND errors", async () => { @@ -219,7 +187,7 @@ describe("CodeIndexOllamaEmbedder", () => { const result = await embedder.validateConfiguration() expect(result.valid).toBe(false) - expect(result.error).toBe("embeddings:ollama.hostNotFound") + expect(result.error).toBe("ollama.hostNotFound") }) it("should handle generic network errors", async () => { @@ -228,7 +196,7 @@ describe("CodeIndexOllamaEmbedder", () => { const result = await embedder.validateConfiguration() expect(result.valid).toBe(false) - expect(result.error).toBe("Network timeout") + expect(result.error).toBe("embeddings:validation.configurationError") }) }) }) diff --git a/src/services/code-index/embedders/lmstudio.ts b/src/services/code-index/embedders/lmstudio.ts new file mode 100644 index 0000000000..726df62443 --- /dev/null +++ b/src/services/code-index/embedders/lmstudio.ts @@ -0,0 +1,220 @@ +import { OpenAI } from "openai" +import { ApiHandlerOptions } from "../../../shared/api" +import { IEmbedder, EmbeddingResponse, EmbedderInfo } from "../interfaces" +import { + MAX_BATCH_TOKENS, + MAX_ITEM_TOKENS, + MAX_BATCH_RETRIES as MAX_RETRIES, + INITIAL_RETRY_DELAY_MS as INITIAL_DELAY_MS, +} from "../constants" +import { withValidationErrorHandling, formatEmbeddingError, HttpError } from "../shared/validation-helpers" +import { t } from "../../../i18n" + +/** + * LM Studio implementation of the embedder interface with batching and rate limiting. + * Uses OpenAI-compatible API endpoints with a custom base URL. + */ +export class CodeIndexLmStudioEmbedder implements IEmbedder { + protected options: ApiHandlerOptions + private embeddingsClient: OpenAI + private readonly defaultModelId: string + + /** + * Creates a new LM Studio embedder + * @param options API handler options including lmStudioBaseUrl + */ + constructor(options: ApiHandlerOptions & { embeddingModelId?: string }) { + this.options = options + + // Normalize base URL to prevent duplicate /v1 if user already provided it + let baseUrl = this.options.lmStudioBaseUrl || "http://localhost:1234" + if (!baseUrl.endsWith("/v1")) { + baseUrl = baseUrl + "/v1" + } + + this.embeddingsClient = new OpenAI({ + baseURL: baseUrl, + apiKey: "noop", // API key is intentionally hardcoded to "noop" because LM Studio does not require authentication + }) + this.defaultModelId = options.embeddingModelId || "text-embedding-nomic-embed-text-v1.5@f16" + } + + /** + * Creates embeddings for the given texts with batching and rate limiting + * @param texts Array of text strings to embed + * @param model Optional model identifier + * @returns Promise resolving to embedding response + */ + async createEmbeddings(texts: string[], model?: string): Promise { + const modelToUse = model || this.defaultModelId + const allEmbeddings: number[][] = [] + const usage = { promptTokens: 0, totalTokens: 0 } + const remainingTexts = [...texts] + + while (remainingTexts.length > 0) { + const currentBatch: string[] = [] + let currentBatchTokens = 0 + const processedIndices: number[] = [] + + for (let i = 0; i < remainingTexts.length; i++) { + const text = remainingTexts[i] + const itemTokens = Math.ceil(text.length / 4) + + if (itemTokens > MAX_ITEM_TOKENS) { + console.warn( + `Text at index ${i} exceeds maximum token limit (${itemTokens} > ${MAX_ITEM_TOKENS}). Skipping.`, + ) + processedIndices.push(i) + continue + } + + if (currentBatchTokens + itemTokens <= MAX_BATCH_TOKENS) { + currentBatch.push(text) + currentBatchTokens += itemTokens + processedIndices.push(i) + } else { + break + } + } + + // Remove processed items from remainingTexts (in reverse order to maintain correct indices) + for (let i = processedIndices.length - 1; i >= 0; i--) { + remainingTexts.splice(processedIndices[i], 1) + } + + if (currentBatch.length > 0) { + const batchResult = await this._embedBatchWithRetries(currentBatch, modelToUse) + allEmbeddings.push(...batchResult.embeddings) + usage.promptTokens += batchResult.usage.promptTokens + usage.totalTokens += batchResult.usage.totalTokens + } + } + + return { embeddings: allEmbeddings, usage } + } + + /** + * Helper method to handle batch embedding with retries and exponential backoff + * @param batchTexts Array of texts to embed in this batch + * @param model Model identifier to use + * @returns Promise resolving to embeddings and usage statistics + */ + private async _embedBatchWithRetries( + batchTexts: string[], + model: string, + ): Promise<{ embeddings: number[][]; usage: { promptTokens: number; totalTokens: number } }> { + for (let attempts = 0; attempts < MAX_RETRIES; attempts++) { + try { + const response = await this.embeddingsClient.embeddings.create({ + input: batchTexts, + model: model, + encoding_format: "float", + }) + + return { + embeddings: response.data.map((item) => item.embedding), + usage: { + promptTokens: response.usage?.prompt_tokens || 0, + totalTokens: response.usage?.total_tokens || 0, + }, + } + } catch (error: any) { + const hasMoreAttempts = attempts < MAX_RETRIES - 1 + + // Check if it's a rate limit error + const httpError = error as HttpError + if (httpError?.status === 429 && hasMoreAttempts) { + const delayMs = INITIAL_DELAY_MS * Math.pow(2, attempts) + await new Promise((resolve) => setTimeout(resolve, delayMs)) + continue + } + + throw error + } + } + + throw new Error(t("embeddings:failedMaxAttempts", { attempts: MAX_RETRIES })) + } + + /** + * Validates the LM Studio embedder configuration by testing connectivity and model availability + * @returns Promise resolving to validation result with success status and optional error message + */ + async validateConfiguration(): Promise<{ valid: boolean; error?: string }> { + return withValidationErrorHandling( + async () => { + // Test with a minimal embedding request + const testTexts = ["test"] + const modelToUse = this.defaultModelId + + try { + const response = await this.embeddingsClient.embeddings.create({ + input: testTexts, + model: modelToUse, + encoding_format: "float", + }) + + // Check if we got a valid response + if (!response.data || response.data.length === 0) { + return { + valid: false, + error: "embeddings:validation.invalidResponse", + } + } + + return { valid: true } + } catch (error: any) { + // Handle LM Studio specific errors + if (error?.message?.includes("ECONNREFUSED") || error?.code === "ECONNREFUSED") { + return { + valid: false, + error: "lmstudio.serviceNotRunning", + } + } + + if (error?.status === 404 || error?.message?.includes("404")) { + return { + valid: false, + error: "lmstudio.modelNotFound", + } + } + + // Re-throw to let standard error handling take over + throw error + } + }, + "lmstudio", + { + beforeStandardHandling: (error: any) => { + // Handle LM Studio-specific connection errors + if ( + error?.message?.includes("fetch failed") || + error?.code === "ECONNREFUSED" || + error?.message?.includes("ECONNREFUSED") + ) { + return { + valid: false, + error: "lmstudio.serviceNotRunning", + } + } + + if (error?.code === "ENOTFOUND" || error?.message?.includes("ENOTFOUND")) { + return { + valid: false, + error: "lmstudio.hostNotFound", + } + } + + // Let standard handling take over + return undefined + }, + }, + ) + } + + get embedderInfo(): EmbedderInfo { + return { + name: "lmstudio", + } + } +} diff --git a/src/services/code-index/interfaces/config.ts b/src/services/code-index/interfaces/config.ts index 190a23e2a3..ea44e404de 100644 --- a/src/services/code-index/interfaces/config.ts +++ b/src/services/code-index/interfaces/config.ts @@ -13,6 +13,7 @@ export interface CodeIndexConfig { ollamaOptions?: ApiHandlerOptions openAiCompatibleOptions?: { baseUrl: string; apiKey: string } geminiOptions?: { apiKey: string } + lmStudioOptions?: ApiHandlerOptions qdrantUrl?: string qdrantApiKey?: string searchMinScore?: number @@ -33,6 +34,7 @@ export type PreviousConfigSnapshot = { openAiCompatibleBaseUrl?: string openAiCompatibleApiKey?: string geminiApiKey?: string + lmStudioBaseUrl?: string qdrantUrl?: string qdrantApiKey?: string } diff --git a/src/services/code-index/interfaces/embedder.ts b/src/services/code-index/interfaces/embedder.ts index 0a74446d5e..58532ff478 100644 --- a/src/services/code-index/interfaces/embedder.ts +++ b/src/services/code-index/interfaces/embedder.ts @@ -28,7 +28,7 @@ export interface EmbeddingResponse { } } -export type AvailableEmbedders = "openai" | "ollama" | "openai-compatible" | "gemini" +export type AvailableEmbedders = "openai" | "ollama" | "openai-compatible" | "gemini" | "lmstudio" export interface EmbedderInfo { name: AvailableEmbedders diff --git a/src/services/code-index/interfaces/manager.ts b/src/services/code-index/interfaces/manager.ts index 70e3fd9765..4e96aef30b 100644 --- a/src/services/code-index/interfaces/manager.ts +++ b/src/services/code-index/interfaces/manager.ts @@ -70,7 +70,15 @@ export interface ICodeIndexManager { } export type IndexingState = "Standby" | "Indexing" | "Indexed" | "Error" -export type EmbedderProvider = "openai" | "ollama" | "openai-compatible" | "gemini" + +/** + * Supported embedder providers for code indexing. + * To add a new provider: + * 1. Add the provider name to this union type + * 2. Update the switch statements in CodeIndexConfigManager + * 3. Add provider-specific configuration options + */ +export type EmbedderProvider = "openai" | "ollama" | "openai-compatible" | "gemini" | "lmstudio" export interface IndexProgressUpdate { systemStatus: IndexingState diff --git a/src/services/code-index/service-factory.ts b/src/services/code-index/service-factory.ts index 818dafb497..1fe6a9b4a4 100644 --- a/src/services/code-index/service-factory.ts +++ b/src/services/code-index/service-factory.ts @@ -3,6 +3,7 @@ import { OpenAiEmbedder } from "./embedders/openai" import { CodeIndexOllamaEmbedder } from "./embedders/ollama" import { OpenAICompatibleEmbedder } from "./embedders/openai-compatible" import { GeminiEmbedder } from "./embedders/gemini" +import { CodeIndexLmStudioEmbedder } from "./embedders/lmstudio" import { EmbedderProvider, getDefaultModelId, getModelDimension } from "../../shared/embeddingModels" import { QdrantVectorStore } from "./vector-store/qdrant-client" import { codeParser, DirectoryScanner, FileWatcher } from "./processors" @@ -62,6 +63,14 @@ export class CodeIndexServiceFactory { throw new Error(t("embeddings:serviceFactory.geminiConfigMissing")) } return new GeminiEmbedder(config.geminiOptions.apiKey) + } else if (provider === "lmstudio") { + if (!config.lmStudioOptions?.lmStudioBaseUrl) { + throw new Error("LM Studio configuration missing for embedder creation") + } + return new CodeIndexLmStudioEmbedder({ + ...config.lmStudioOptions, + embeddingModelId: config.modelId, + }) } throw new Error( diff --git a/src/services/code-index/shared/validation-helpers.ts b/src/services/code-index/shared/validation-helpers.ts index 0182264579..6f3ee7670a 100644 --- a/src/services/code-index/shared/validation-helpers.ts +++ b/src/services/code-index/shared/validation-helpers.ts @@ -146,13 +146,8 @@ export function handleValidationError( } } - // For generic errors, preserve the original error message if it's not a standard one - if (errorMessage && errorMessage !== "Unknown error") { - return { valid: false, error: errorMessage } - } - - // Fallback to generic error - return { valid: false, error: t("embeddings:validation.configurationError") } + // For generic errors, always return the translation key for consistency + return { valid: false, error: "embeddings:validation.configurationError" } } /** diff --git a/src/shared/embeddingModels.ts b/src/shared/embeddingModels.ts index 4c6bc24319..09570765a8 100644 --- a/src/shared/embeddingModels.ts +++ b/src/shared/embeddingModels.ts @@ -2,7 +2,7 @@ * Defines profiles for different embedding models, including their dimensions. */ -export type EmbedderProvider = "openai" | "ollama" | "openai-compatible" | "gemini" // Add other providers as needed +export type EmbedderProvider = "openai" | "ollama" | "openai-compatible" | "gemini" | "lmstudio" // Add other providers as needed export interface EmbeddingModelProfile { dimension: number @@ -49,6 +49,11 @@ export const EMBEDDING_MODEL_PROFILES: EmbeddingModelProfiles = { gemini: { "text-embedding-004": { dimension: 768 }, }, + lmstudio: { + "text-embedding-nomic-embed-text-v1.5@f16": { dimension: 768 }, + "text-embedding-nomic-embed-text-v1.5@f32": { dimension: 768 }, + "text-embedding-mxbai-embed-large-v1": { dimension: 1024 }, + }, } /** @@ -136,6 +141,19 @@ export function getDefaultModelId(provider: EmbedderProvider): string { case "gemini": return "text-embedding-004" + case "lmstudio": { + // Choose a sensible default for LM Studio, e.g., the first one listed or a specific one + const lmStudioModels = EMBEDDING_MODEL_PROFILES.lmstudio + const defaultLmStudioModel = lmStudioModels && Object.keys(lmStudioModels)[0] + if (defaultLmStudioModel) { + return defaultLmStudioModel + } + // Fallback if no LM Studio models are defined (shouldn't happen with the constant) + console.warn("No default LM Studio model found in profiles.") + // Return a placeholder or throw an error, depending on desired behavior + return "unknown-default" // Placeholder specific model ID + } + default: // Fallback for unknown providers console.warn(`Unknown provider for default model ID: ${provider}. Falling back to OpenAI default.`) diff --git a/webview-ui/src/components/settings/providers/LMStudio.tsx b/webview-ui/src/components/settings/providers/LMStudio.tsx index a907e43e1b..f0167508ff 100644 --- a/webview-ui/src/components/settings/providers/LMStudio.tsx +++ b/webview-ui/src/components/settings/providers/LMStudio.tsx @@ -56,47 +56,38 @@ export const LMStudio = ({ apiConfiguration, setApiConfigurationField }: LMStudi vscode.postMessage({ type: "requestLmStudioModels" }) }, []) + // Reusable function to check if a model is available + const checkModelAvailability = useCallback( + (modelId: string | undefined): boolean => { + if (!modelId) return false + + // Check if model exists in local LM Studio models + if (lmStudioModels.length > 0 && lmStudioModels.includes(modelId)) { + return false // Model is available locally + } + + // If we have router models data for LM Studio + if (routerModels.data?.lmstudio) { + const availableModels = Object.keys(routerModels.data.lmstudio) + // Show warning if model is not in the list (regardless of how many models there are) + return !availableModels.includes(modelId) + } + + // If neither source has loaded yet, don't show warning + return false + }, + [lmStudioModels, routerModels.data], + ) + // Check if the selected model exists in the fetched models const modelNotAvailable = useMemo(() => { - const selectedModel = apiConfiguration?.lmStudioModelId - if (!selectedModel) return false - - // Check if model exists in local LM Studio models - if (lmStudioModels.length > 0 && lmStudioModels.includes(selectedModel)) { - return false // Model is available locally - } - - // If we have router models data for LM Studio - if (routerModels.data?.lmstudio) { - const availableModels = Object.keys(routerModels.data.lmstudio) - // Show warning if model is not in the list (regardless of how many models there are) - return !availableModels.includes(selectedModel) - } - - // If neither source has loaded yet, don't show warning - return false - }, [apiConfiguration?.lmStudioModelId, routerModels.data, lmStudioModels]) + return checkModelAvailability(apiConfiguration?.lmStudioModelId) + }, [apiConfiguration?.lmStudioModelId, checkModelAvailability]) // Check if the draft model exists const draftModelNotAvailable = useMemo(() => { - const draftModel = apiConfiguration?.lmStudioDraftModelId - if (!draftModel) return false - - // Check if model exists in local LM Studio models - if (lmStudioModels.length > 0 && lmStudioModels.includes(draftModel)) { - return false // Model is available locally - } - - // If we have router models data for LM Studio - if (routerModels.data?.lmstudio) { - const availableModels = Object.keys(routerModels.data.lmstudio) - // Show warning if model is not in the list (regardless of how many models there are) - return !availableModels.includes(draftModel) - } - - // If neither source has loaded yet, don't show warning - return false - }, [apiConfiguration?.lmStudioDraftModelId, routerModels.data, lmStudioModels]) + return checkModelAvailability(apiConfiguration?.lmStudioDraftModelId) + }, [apiConfiguration?.lmStudioDraftModelId, checkModelAvailability]) return ( <> diff --git a/webview-ui/src/i18n/locales/ca/settings.json b/webview-ui/src/i18n/locales/ca/settings.json index 57b5dadaae..9aa6c4f570 100644 --- a/webview-ui/src/i18n/locales/ca/settings.json +++ b/webview-ui/src/i18n/locales/ca/settings.json @@ -119,7 +119,10 @@ "searchMinScoreResetTooltip": "Restablir al valor per defecte (0.4)", "searchMaxResultsLabel": "Màxim de resultats de cerca", "searchMaxResultsDescription": "Nombre màxim de resultats de cerca a retornar quan es consulta l'índex de la base de codi. Els valors més alts proporcionen més context però poden incloure resultats menys rellevants.", - "resetToDefault": "Restablir al valor per defecte" + "resetToDefault": "Restablir al valor per defecte", + "lmstudioProvider": "LM Studio", + "lmstudioUrlLabel": "LM Studio URL:", + "lmstudioUrlPlaceholder": "http://localhost:1234" }, "autoApprove": { "description": "Permet que Roo realitzi operacions automàticament sense requerir aprovació. Activeu aquesta configuració només si confieu plenament en la IA i enteneu els riscos de seguretat associats.", diff --git a/webview-ui/src/i18n/locales/de/settings.json b/webview-ui/src/i18n/locales/de/settings.json index 8d57652ba2..c8dd97d5a3 100644 --- a/webview-ui/src/i18n/locales/de/settings.json +++ b/webview-ui/src/i18n/locales/de/settings.json @@ -119,7 +119,10 @@ "searchMinScoreResetTooltip": "Auf Standardwert zurücksetzen (0.4)", "searchMaxResultsLabel": "Maximale Suchergebnisse", "searchMaxResultsDescription": "Maximale Anzahl von Suchergebnissen, die bei der Abfrage des Codebase-Index zurückgegeben werden. Höhere Werte bieten mehr Kontext, können aber weniger relevante Ergebnisse enthalten.", - "resetToDefault": "Auf Standard zurücksetzen" + "resetToDefault": "Auf Standard zurücksetzen", + "lmstudioProvider": "LM Studio", + "lmstudioUrlLabel": "LM Studio URL:", + "lmstudioUrlPlaceholder": "http://localhost:1234" }, "autoApprove": { "description": "Erlaubt Roo, Operationen automatisch ohne Genehmigung durchzuführen. Aktiviere diese Einstellungen nur, wenn du der KI vollständig vertraust und die damit verbundenen Sicherheitsrisiken verstehst.", diff --git a/webview-ui/src/i18n/locales/en/settings.json b/webview-ui/src/i18n/locales/en/settings.json index da40058b00..7d26187be0 100644 --- a/webview-ui/src/i18n/locales/en/settings.json +++ b/webview-ui/src/i18n/locales/en/settings.json @@ -50,9 +50,10 @@ "openaiProvider": "OpenAI", "ollamaProvider": "Ollama", "geminiProvider": "Gemini", + "openaiCompatibleProvider": "OpenAI Compatible", + "lmstudioProvider": "LM Studio", "geminiApiKeyLabel": "API Key:", "geminiApiKeyPlaceholder": "Enter your Gemini API key", - "openaiCompatibleProvider": "OpenAI Compatible", "openAiKeyLabel": "OpenAI API Key", "openAiKeyPlaceholder": "Enter your OpenAI API key", "openAiCompatibleBaseUrlLabel": "Base URL", @@ -62,6 +63,7 @@ "modelDimensionLabel": "Model Dimension", "openAiCompatibleModelDimensionPlaceholder": "e.g., 1536", "openAiCompatibleModelDimensionDescription": "The embedding dimension (output size) for your model. Check your provider's documentation for this value. Common values: 384, 768, 1536, 3072.", + "lmstudioUrlLabel": "LM Studio URL:", "modelLabel": "Model", "modelPlaceholder": "Enter model name", "selectModel": "Select a model", @@ -91,6 +93,7 @@ }, "ollamaUrlPlaceholder": "http://localhost:11434", "openAiCompatibleBaseUrlPlaceholder": "https://api.example.com", + "lmstudioUrlPlaceholder": "http://localhost:1234", "modelDimensionPlaceholder": "1536", "qdrantUrlPlaceholder": "http://localhost:6333", "saveError": "Failed to save settings", diff --git a/webview-ui/src/i18n/locales/es/settings.json b/webview-ui/src/i18n/locales/es/settings.json index b91d0e055f..4ec3047fb3 100644 --- a/webview-ui/src/i18n/locales/es/settings.json +++ b/webview-ui/src/i18n/locales/es/settings.json @@ -120,6 +120,10 @@ "searchMaxResultsLabel": "Resultados máximos de búsqueda", "searchMaxResultsDescription": "Número máximo de resultados de búsqueda a devolver al consultar el índice de código. Valores más altos proporcionan más contexto pero pueden incluir resultados menos relevantes.", "resetToDefault": "Restablecer al valor predeterminado" +, + "lmstudioProvider": "LM Studio", + "lmstudioUrlLabel": "LM Studio URL:", + "lmstudioUrlPlaceholder": "http://localhost:1234" }, "autoApprove": { "description": "Permitir que Roo realice operaciones automáticamente sin requerir aprobación. Habilite esta configuración solo si confía plenamente en la IA y comprende los riesgos de seguridad asociados.", diff --git a/webview-ui/src/i18n/locales/fr/settings.json b/webview-ui/src/i18n/locales/fr/settings.json index d4940567c6..c29cbcd521 100644 --- a/webview-ui/src/i18n/locales/fr/settings.json +++ b/webview-ui/src/i18n/locales/fr/settings.json @@ -120,6 +120,10 @@ "searchMaxResultsLabel": "Résultats de recherche maximum", "searchMaxResultsDescription": "Nombre maximum de résultats de recherche à retourner lors de l'interrogation de l'index de code. Des valeurs plus élevées fournissent plus de contexte mais peuvent inclure des résultats moins pertinents.", "resetToDefault": "Réinitialiser par défaut" +, + "lmstudioProvider": "LM Studio", + "lmstudioUrlLabel": "LM Studio URL:", + "lmstudioUrlPlaceholder": "http://localhost:1234" }, "autoApprove": { "description": "Permettre à Roo d'effectuer automatiquement des opérations sans requérir d'approbation. Activez ces paramètres uniquement si vous faites entièrement confiance à l'IA et que vous comprenez les risques de sécurité associés.", diff --git a/webview-ui/src/i18n/locales/hi/settings.json b/webview-ui/src/i18n/locales/hi/settings.json index 8665afc990..6e3143ce33 100644 --- a/webview-ui/src/i18n/locales/hi/settings.json +++ b/webview-ui/src/i18n/locales/hi/settings.json @@ -120,6 +120,10 @@ "searchMaxResultsLabel": "अधिकतम खोज परिणाम", "searchMaxResultsDescription": "कोडबेस इंडेक्स को क्वेरी करते समय वापस करने के लिए खोज परिणामों की अधिकतम संख्या। उच्च मान अधिक संदर्भ प्रदान करते हैं लेकिन कम प्रासंगिक परिणाम शामिल कर सकते हैं।", "resetToDefault": "डिफ़ॉल्ट पर रीसेट करें" +, + "lmstudioProvider": "LM Studio", + "lmstudioUrlLabel": "LM Studio URL:", + "lmstudioUrlPlaceholder": "http://localhost:1234" }, "autoApprove": { "description": "Roo को अनुमोदन की आवश्यकता के बिना स्वचालित रूप से ऑपरेशन करने की अनुमति दें। इन सेटिंग्स को केवल तभी सक्षम करें जब आप AI पर पूरी तरह से भरोसा करते हों और संबंधित सुरक्षा जोखिमों को समझते हों।", diff --git a/webview-ui/src/i18n/locales/id/settings.json b/webview-ui/src/i18n/locales/id/settings.json index 199751e384..6c0c60290f 100644 --- a/webview-ui/src/i18n/locales/id/settings.json +++ b/webview-ui/src/i18n/locales/id/settings.json @@ -120,6 +120,10 @@ "searchMaxResultsLabel": "Hasil Pencarian Maksimum", "searchMaxResultsDescription": "Jumlah maksimum hasil pencarian yang dikembalikan saat melakukan query indeks basis kode. Nilai yang lebih tinggi memberikan lebih banyak konteks tetapi mungkin menyertakan hasil yang kurang relevan.", "resetToDefault": "Reset ke default" +, + "lmstudioProvider": "LM Studio", + "lmstudioUrlLabel": "LM Studio URL:", + "lmstudioUrlPlaceholder": "http://localhost:1234" }, "autoApprove": { "description": "Izinkan Roo untuk secara otomatis melakukan operasi tanpa memerlukan persetujuan. Aktifkan pengaturan ini hanya jika kamu sepenuhnya mempercayai AI dan memahami risiko keamanan yang terkait.", diff --git a/webview-ui/src/i18n/locales/it/settings.json b/webview-ui/src/i18n/locales/it/settings.json index 48a4c8e4db..8d5e2f5853 100644 --- a/webview-ui/src/i18n/locales/it/settings.json +++ b/webview-ui/src/i18n/locales/it/settings.json @@ -120,6 +120,10 @@ "searchMaxResultsLabel": "Risultati di ricerca massimi", "searchMaxResultsDescription": "Numero massimo di risultati di ricerca da restituire quando si interroga l'indice del codice. Valori più alti forniscono più contesto ma possono includere risultati meno pertinenti.", "resetToDefault": "Ripristina al valore predefinito" +, + "lmstudioProvider": "LM Studio", + "lmstudioUrlLabel": "LM Studio URL:", + "lmstudioUrlPlaceholder": "http://localhost:1234" }, "autoApprove": { "description": "Permetti a Roo di eseguire automaticamente operazioni senza richiedere approvazione. Abilita queste impostazioni solo se ti fidi completamente dell'IA e comprendi i rischi di sicurezza associati.", diff --git a/webview-ui/src/i18n/locales/ja/settings.json b/webview-ui/src/i18n/locales/ja/settings.json index 397ce67f62..f46fd1be4e 100644 --- a/webview-ui/src/i18n/locales/ja/settings.json +++ b/webview-ui/src/i18n/locales/ja/settings.json @@ -120,6 +120,10 @@ "searchMaxResultsLabel": "最大検索結果数", "searchMaxResultsDescription": "コードベースインデックスをクエリする際に返される検索結果の最大数。値を高くするとより多くのコンテキストが提供されますが、関連性の低い結果が含まれる可能性があります。", "resetToDefault": "デフォルトにリセット" +, + "lmstudioProvider": "LM Studio", + "lmstudioUrlLabel": "LM Studio URL:", + "lmstudioUrlPlaceholder": "http://localhost:1234" }, "autoApprove": { "description": "Rooが承認なしで自動的に操作を実行できるようにします。AIを完全に信頼し、関連するセキュリティリスクを理解している場合にのみ、これらの設定を有効にしてください。", diff --git a/webview-ui/src/i18n/locales/ko/settings.json b/webview-ui/src/i18n/locales/ko/settings.json index 746cea65ad..65ddb07c14 100644 --- a/webview-ui/src/i18n/locales/ko/settings.json +++ b/webview-ui/src/i18n/locales/ko/settings.json @@ -120,6 +120,10 @@ "searchMaxResultsLabel": "최대 검색 결과", "searchMaxResultsDescription": "코드베이스 인덱스를 쿼리할 때 반환할 최대 검색 결과 수입니다. 값이 높을수록 더 많은 컨텍스트를 제공하지만 관련성이 낮은 결과가 포함될 수 있습니다.", "resetToDefault": "기본값으로 재설정" +, + "lmstudioProvider": "LM Studio", + "lmstudioUrlLabel": "LM Studio URL:", + "lmstudioUrlPlaceholder": "http://localhost:1234" }, "autoApprove": { "description": "Roo가 승인 없이 자동으로 작업을 수행할 수 있도록 허용합니다. AI를 완전히 신뢰하고 관련 보안 위험을 이해하는 경우에만 이러한 설정을 활성화하세요.", diff --git a/webview-ui/src/i18n/locales/nl/settings.json b/webview-ui/src/i18n/locales/nl/settings.json index c5315205ca..8df4e5c19b 100644 --- a/webview-ui/src/i18n/locales/nl/settings.json +++ b/webview-ui/src/i18n/locales/nl/settings.json @@ -120,6 +120,10 @@ "searchMaxResultsLabel": "Maximum Zoekresultaten", "searchMaxResultsDescription": "Maximum aantal zoekresultaten dat wordt geretourneerd bij het doorzoeken van de codebase-index. Hogere waarden bieden meer context maar kunnen minder relevante resultaten bevatten.", "resetToDefault": "Reset naar standaard" +, + "lmstudioProvider": "LM Studio", + "lmstudioUrlLabel": "LM Studio URL:", + "lmstudioUrlPlaceholder": "http://localhost:1234" }, "autoApprove": { "description": "Sta Roo toe om automatisch handelingen uit te voeren zonder goedkeuring. Schakel deze instellingen alleen in als je de AI volledig vertrouwt en de bijbehorende beveiligingsrisico's begrijpt.", diff --git a/webview-ui/src/i18n/locales/pl/settings.json b/webview-ui/src/i18n/locales/pl/settings.json index 1829c23ba4..3fd005c801 100644 --- a/webview-ui/src/i18n/locales/pl/settings.json +++ b/webview-ui/src/i18n/locales/pl/settings.json @@ -120,6 +120,10 @@ "searchMaxResultsLabel": "Maksymalna liczba wyników wyszukiwania", "searchMaxResultsDescription": "Maksymalna liczba wyników wyszukiwania zwracanych podczas zapytania do indeksu bazy kodu. Wyższe wartości zapewniają więcej kontekstu, ale mogą zawierać mniej istotne wyniki.", "resetToDefault": "Przywróć domyślne" +, + "lmstudioProvider": "LM Studio", + "lmstudioUrlLabel": "LM Studio URL:", + "lmstudioUrlPlaceholder": "http://localhost:1234" }, "autoApprove": { "description": "Pozwól Roo na automatyczne wykonywanie operacji bez wymagania zatwierdzenia. Włącz te ustawienia tylko jeśli w pełni ufasz AI i rozumiesz związane z tym zagrożenia bezpieczeństwa.", diff --git a/webview-ui/src/i18n/locales/pt-BR/settings.json b/webview-ui/src/i18n/locales/pt-BR/settings.json index 6e46cc8c3e..fd3b283813 100644 --- a/webview-ui/src/i18n/locales/pt-BR/settings.json +++ b/webview-ui/src/i18n/locales/pt-BR/settings.json @@ -120,6 +120,10 @@ "searchMaxResultsLabel": "Resultados máximos de busca", "searchMaxResultsDescription": "Número máximo de resultados de busca a retornar ao consultar o índice de código. Valores mais altos fornecem mais contexto, mas podem incluir resultados menos relevantes.", "resetToDefault": "Redefinir para o padrão" +, + "lmstudioProvider": "LM Studio", + "lmstudioUrlLabel": "LM Studio URL:", + "lmstudioUrlPlaceholder": "http://localhost:1234" }, "autoApprove": { "description": "Permitir que o Roo realize operações automaticamente sem exigir aprovação. Ative essas configurações apenas se confiar totalmente na IA e compreender os riscos de segurança associados.", diff --git a/webview-ui/src/i18n/locales/ru/settings.json b/webview-ui/src/i18n/locales/ru/settings.json index e0a897c2e5..e693088624 100644 --- a/webview-ui/src/i18n/locales/ru/settings.json +++ b/webview-ui/src/i18n/locales/ru/settings.json @@ -120,6 +120,10 @@ "searchMaxResultsLabel": "Максимальное количество результатов поиска", "searchMaxResultsDescription": "Максимальное количество результатов поиска, возвращаемых при запросе индекса кодовой базы. Более высокие значения предоставляют больше контекста, но могут включать менее релевантные результаты.", "resetToDefault": "Сбросить к значению по умолчанию" +, + "lmstudioProvider": "LM Studio", + "lmstudioUrlLabel": "LM Studio URL:", + "lmstudioUrlPlaceholder": "http://localhost:1234" }, "autoApprove": { "description": "Разрешить Roo автоматически выполнять операции без необходимости одобрения. Включайте эти параметры только если полностью доверяете ИИ и понимаете связанные с этим риски безопасности.", diff --git a/webview-ui/src/i18n/locales/tr/settings.json b/webview-ui/src/i18n/locales/tr/settings.json index 486991ec0d..7b7afa8fac 100644 --- a/webview-ui/src/i18n/locales/tr/settings.json +++ b/webview-ui/src/i18n/locales/tr/settings.json @@ -120,6 +120,10 @@ "searchMaxResultsLabel": "Maksimum Arama Sonuçları", "searchMaxResultsDescription": "Kod tabanı dizinini sorgularken döndürülecek maksimum arama sonucu sayısı. Daha yüksek değerler daha fazla bağlam sağlar ancak daha az alakalı sonuçlar içerebilir.", "resetToDefault": "Varsayılana sıfırla" +, + "lmstudioProvider": "LM Studio", + "lmstudioUrlLabel": "LM Studio URL:", + "lmstudioUrlPlaceholder": "http://localhost:1234" }, "autoApprove": { "description": "Roo'nun onay gerektirmeden otomatik olarak işlemler gerçekleştirmesine izin verin. Bu ayarları yalnızca yapay zekaya tamamen güveniyorsanız ve ilgili güvenlik risklerini anlıyorsanız etkinleştirin.", diff --git a/webview-ui/src/i18n/locales/vi/settings.json b/webview-ui/src/i18n/locales/vi/settings.json index e31355b403..fe111c2d5b 100644 --- a/webview-ui/src/i18n/locales/vi/settings.json +++ b/webview-ui/src/i18n/locales/vi/settings.json @@ -120,6 +120,10 @@ "searchMaxResultsLabel": "Số Kết Quả Tìm Kiếm Tối Đa", "searchMaxResultsDescription": "Số lượng kết quả tìm kiếm tối đa được trả về khi truy vấn chỉ mục cơ sở mã. Giá trị cao hơn cung cấp nhiều ngữ cảnh hơn nhưng có thể bao gồm các kết quả ít liên quan hơn.", "resetToDefault": "Đặt lại về mặc định" +, + "lmstudioProvider": "LM Studio", + "lmstudioUrlLabel": "LM Studio URL:", + "lmstudioUrlPlaceholder": "http://localhost:1234" }, "autoApprove": { "description": "Cho phép Roo tự động thực hiện các hoạt động mà không cần phê duyệt. Chỉ bật những cài đặt này nếu bạn hoàn toàn tin tưởng AI và hiểu rõ các rủi ro bảo mật liên quan.", diff --git a/webview-ui/src/i18n/locales/zh-CN/settings.json b/webview-ui/src/i18n/locales/zh-CN/settings.json index 4b46b9af0a..c8e941e978 100644 --- a/webview-ui/src/i18n/locales/zh-CN/settings.json +++ b/webview-ui/src/i18n/locales/zh-CN/settings.json @@ -120,6 +120,10 @@ "searchMaxResultsLabel": "最大搜索结果数", "searchMaxResultsDescription": "查询代码库索引时返回的最大搜索结果数。较高的值提供更多上下文,但可能包含相关性较低的结果。", "resetToDefault": "恢复默认值" +, + "lmstudioProvider": "LM Studio", + "lmstudioUrlLabel": "LM Studio URL:", + "lmstudioUrlPlaceholder": "http://localhost:1234" }, "autoApprove": { "description": "允许 Roo 自动执行操作而无需批准。只有在您完全信任 AI 并了解相关安全风险的情况下才启用这些设置。", diff --git a/webview-ui/src/i18n/locales/zh-TW/settings.json b/webview-ui/src/i18n/locales/zh-TW/settings.json index 3e35097b1e..723b984582 100644 --- a/webview-ui/src/i18n/locales/zh-TW/settings.json +++ b/webview-ui/src/i18n/locales/zh-TW/settings.json @@ -120,6 +120,10 @@ "searchMaxResultsLabel": "最大搜尋結果數", "searchMaxResultsDescription": "查詢程式碼庫索引時傳回的最大搜尋結果數。較高的值提供更多上下文,但可能包含相關性較低的結果。", "resetToDefault": "重設為預設值" +, + "lmstudioProvider": "LM Studio", + "lmstudioUrlLabel": "LM Studio URL:", + "lmstudioUrlPlaceholder": "http://localhost:1234" }, "autoApprove": { "description": "允許 Roo 無需核准即執行操作。僅在您完全信任 AI 並了解相關安全風險時啟用這些設定。",