Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 8 additions & 1 deletion idea-plugin/src/main/kotlin/com/itangcent/ai/AIProvider.kt
Original file line number Diff line number Diff line change
Expand Up @@ -27,14 +27,21 @@ enum class AIProvider(val displayName: String, val models: List<AIModel>) {
AIModel("deepseek-chat", "DeepSeek-V3"),
AIModel("deepseek-reasoner", "DeepSeek-R1")
)
),

/**
* Local LLM service
*/
LOCALLM(
"LocalLLM", emptyList()
);

companion object {
/**
* Get AIProvider by its display name (case-insensitive)
*/
fun fromDisplayName(name: String?): AIProvider? {
return values().find { it.displayName.equals(name, ignoreCase = true) }
return entries.find { it.displayName.equals(name, ignoreCase = true) }
}

/**
Expand Down
1 change: 1 addition & 0 deletions idea-plugin/src/main/kotlin/com/itangcent/ai/AIService.kt
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import com.itangcent.spi.SpiSingleBeanProvider
*/
@ProvidedBy(AIServiceProvider::class)
interface AIService {

/**
* Sends a prompt to the AI service and returns the response
* @param prompt The user prompt to send to the AI service
Expand Down
47 changes: 47 additions & 0 deletions idea-plugin/src/main/kotlin/com/itangcent/ai/AIServiceChecker.kt
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
package com.itangcent.ai

import com.itangcent.common.logger.Log
import com.itangcent.common.logger.traceError

/**
* A utility class for checking the health and availability of AI services.
* This class provides methods to verify if AI services are operational and can handle requests.
*/
object AIServiceHealthChecker : Log() {
/**
* Checks if the AI service is available and can handle requests.
* For regular AI services, it verifies by sending a simple test prompt.
* For Local LLM clients, it checks if there are any available models.
*
* @return true if the service is available and can handle requests, false otherwise
*/
fun AIService.isAvailable(): Boolean {
if (this is LocalLLMClient) {
return this.hasAvailableModels()
}
return try {
val response =
sendPrompt(systemMessage = "Answer Question", userPrompt = "Please respond with exactly 'YES'")
response.contains("YES", ignoreCase = true)
} catch (e: Exception) {
LOG.traceError("Failed to check AI service", e)
false
}
}

/**
* Checks if the Local LLM client has any available models.
* This is used to verify if the local LLM service is properly configured and ready to use.
*
* @return true if there are available models, false otherwise
*/
fun LocalLLMClient.hasAvailableModels(): Boolean {
try {
val availableModels = this.getAvailableModels()
return availableModels.isNotEmpty()
} catch (e: Exception) {
LOG.traceError("Failed to check AI service", e)
return false
}
}
}
101 changes: 101 additions & 0 deletions idea-plugin/src/main/kotlin/com/itangcent/ai/LocalLLMClient.kt
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
package com.itangcent.ai

import com.itangcent.common.utils.GsonUtils
import com.itangcent.http.HttpClient
import com.itangcent.http.RawContentType
import com.itangcent.http.contentType
import com.itangcent.idea.plugin.utils.AIUtils
import com.itangcent.intellij.extend.sub

/**
* Client implementation for interacting with a local LLM server.
* This class handles the direct communication with the LLM server API.
*/
class LocalLLMClient(
private val serverUrl: String,
private val modelName: String,
private val httpClient: HttpClient
) : AIService {

companion object {
private const val CHAT_COMPLETIONS_ENDPOINT = "/chat/completions"
private const val MODELS_ENDPOINT = "/models"
}

/**
* Sends a prompt to the local LLM service with a custom system message.
*
* @param systemMessage The system message that sets the context for the LLM
* @param userPrompt The user's input prompt to be processed
* @return The LLM's response as a string
* @throws AIConfigurationException if the local LLM server URL is not configured
* @throws AIApiException if there's an error in the API response or communication
*/
override fun sendPrompt(systemMessage: String, userPrompt: String): String {
val fullUrl = "$serverUrl$CHAT_COMPLETIONS_ENDPOINT"
try {
val requestBodyMap = mapOf(
"messages" to listOf(
mapOf("role" to "system", "content" to systemMessage),
mapOf("role" to "user", "content" to userPrompt)
),
"model" to modelName,
"stream" to false
)

val requestBody = GsonUtils.toJson(requestBodyMap)
val httpRequest = httpClient.post(fullUrl)
.contentType(RawContentType.APPLICATION_JSON)
.body(requestBody)

val httpResponse = httpRequest.call()

if (httpResponse.code() != 200) {
val errorMessage =
"Local LLM server returned status code ${httpResponse.code()}: ${httpResponse.string()}"
throw AIApiException(errorMessage)
}

val responseBody = httpResponse.string() ?: throw AIApiException("Empty response from Local LLM server")
val jsonElement = GsonUtils.parseToJsonTree(responseBody)
val content = jsonElement.sub("choices")?.asJsonArray?.firstOrNull()
?.asJsonObject?.sub("message")?.sub("content")?.asString
val errMsg = jsonElement.sub("error")?.asString
return content?.let { AIUtils.cleanMarkdownCodeBlocks(it) }
?: throw AIApiException(errMsg ?: "Could not parse response from Local LLM server")
} catch (e: AIException) {
throw e
} catch (e: Exception) {
throw AIApiException("Error calling Local LLM server: ${e.message}", e)
}
}

/**
* Retrieves the list of available models from the local LLM server.
*
* @return List of model IDs available on the server
* @throws AIApiException if there's an error in the API response or communication
*/
fun getAvailableModels(): List<String> {
val url = "$serverUrl$MODELS_ENDPOINT"

try {
val response = httpClient.get(url).call()

if (response.code() != 200) {
throw AIApiException("Failed to get models: ${response.code()}")
}

val responseBody = response.string() ?: throw AIApiException("Empty response from server")
val jsonElement = GsonUtils.parseToJsonTree(responseBody)
val dataArray = jsonElement.sub("data")?.asJsonArray
?: throw AIApiException("Invalid response format: missing 'data' array")

return dataArray.mapNotNull { modelObj ->
modelObj.asJsonObject.sub("id")?.asString
}
} catch (e: Exception) {
throw AIApiException("Error getting models: ${e.message}", e)
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
package com.itangcent.ai

import com.itangcent.ai.AIServiceHealthChecker.isAvailable
import com.itangcent.common.logger.Log
import com.itangcent.http.HttpClient

/**
* Utility class for discovering and validating LocalLLM servers.
* This class attempts to find a working LocalLLM server by trying various common API endpoint suffixes.
*
* @property httpClient The HTTP client used for making requests
* @property possibleSuffixes List of possible API endpoint suffixes to try
*/
class LocalLLMServerDiscoverer(
private val httpClient: HttpClient,
private val possibleSuffixes: List<String> = DEFAULT_SUFFIXES
) {
companion object : Log() {
private val DEFAULT_SUFFIXES = listOf(
"/v1",
"/api/v1",
"/api",
"/v1/api"
)
}

/**
* Attempts to discover a working LocalLLM server URL from a base URL.
* The method will try the base URL first, then attempt various API endpoint suffixes.
*
* @param baseUrl The base URL to start searching from (e.g., "http://localhost:8000")
* @return The working server URL if found, null otherwise
*/
fun discoverServer(baseUrl: String): String? {
val trimmedUrl = baseUrl.trimEnd('/')
if (validateLocalLLMServer(trimmedUrl)) {
LOG.debug("Found working server at base URL: $trimmedUrl")
return trimmedUrl
}

// Try all possible suffixes
for (suffix in possibleSuffixes) {
if (baseUrl.endsWith(suffix)) {
LOG.debug("Skipping suffix $suffix as it's already in the base URL")
continue
}
val serverUrl = if (suffix.isEmpty()) trimmedUrl else "$trimmedUrl$suffix"
if (validateLocalLLMServer(serverUrl)) {
LOG.debug("Found working server at URL: $serverUrl")
return serverUrl
}
}

LOG.warn("No working LocalLLM server found for base URL: $baseUrl")
return null
}

/**
* Validates if a given URL points to a working LocalLLM server.
* A server is considered working if it responds to health checks and supports the required API endpoints.
*
* @param serverUrl The URL to validate
* @return true if the server is working, false otherwise
*/
private fun validateLocalLLMServer(serverUrl: String): Boolean {
try {
val localLLMService = LocalLLMClient(
serverUrl = serverUrl,
modelName = "",
httpClient = httpClient
)
return localLLMService.isAvailable()
} catch (e: Exception) {
LOG.debug("Server validation failed for $serverUrl: ${e.message}")
return false
}
}
}
62 changes: 62 additions & 0 deletions idea-plugin/src/main/kotlin/com/itangcent/ai/LocalLLMService.kt
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
package com.itangcent.ai

import com.google.inject.Inject
import com.google.inject.Singleton
import com.itangcent.idea.plugin.condition.ConditionOnSetting
import com.itangcent.idea.plugin.settings.helper.AISettingsHelper
import com.itangcent.suv.http.HttpClientProvider


/**
* Implementation of AIService that interfaces with a local LLM server.
*/
@Singleton
@ConditionOnSetting("aiProvider", havingValue = "LocalLLM")
open class LocalLLMService : AIService {

companion object

@Inject
private lateinit var aiSettingsHelper: AISettingsHelper

@Inject
private lateinit var httpClientProvider: HttpClientProvider

private val rawLocalLLMService: LocalLLMClient by lazy {
LocalLLMClient(getServerUrl(), getModelName(), httpClientProvider.getHttpClient())
}

/**
* Sends a prompt to the local LLM service with a custom system message.
*
* @param systemMessage The system message that sets the context for the LLM
* @param userPrompt The user's input prompt to be processed
* @return The LLM's response as a string
* @throws AIConfigurationException if the local LLM server URL is not configured
* @throws AIApiException if there's an error in the API response or communication
*/
override fun sendPrompt(systemMessage: String, userPrompt: String): String {
return rawLocalLLMService.sendPrompt(systemMessage, userPrompt)
}

/**
* Retrieves the configured local LLM server URL from settings.
*
* @return The configured server URL
* @throws AIConfigurationException if the URL is not configured
*/
private fun getServerUrl(): String {
return aiSettingsHelper.aiLocalServerUrl
?: throw AIConfigurationException("Local LLM server URL is not configured")
}

/**
* Retrieves the configured model name from settings or returns a default value.
*
* @return The configured model name or "local-model" as default
*/
private fun getModelName(): String {
return aiSettingsHelper.aiModel ?: "local-model"
}
}

Loading
Loading