Skip to content

Commit bf7aba1

Browse files
authored
feat: Support Local LLM service (#1212)
1 parent 03e2431 commit bf7aba1

File tree

12 files changed

+574
-81
lines changed

12 files changed

+574
-81
lines changed

idea-plugin/src/main/kotlin/com/itangcent/ai/AIProvider.kt

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,14 +27,21 @@ enum class AIProvider(val displayName: String, val models: List<AIModel>) {
2727
AIModel("deepseek-chat", "DeepSeek-V3"),
2828
AIModel("deepseek-reasoner", "DeepSeek-R1")
2929
)
30+
),
31+
32+
/**
33+
* Local LLM service
34+
*/
35+
LOCALLM(
36+
"LocalLLM", emptyList()
3037
);
3138

3239
companion object {
3340
/**
3441
* Get AIProvider by its display name (case-insensitive)
3542
*/
3643
fun fromDisplayName(name: String?): AIProvider? {
37-
return values().find { it.displayName.equals(name, ignoreCase = true) }
44+
return entries.find { it.displayName.equals(name, ignoreCase = true) }
3845
}
3946

4047
/**

idea-plugin/src/main/kotlin/com/itangcent/ai/AIService.kt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ import com.itangcent.spi.SpiSingleBeanProvider
99
*/
1010
@ProvidedBy(AIServiceProvider::class)
1111
interface AIService {
12+
1213
/**
1314
* Sends a prompt to the AI service and returns the response
1415
* @param prompt The user prompt to send to the AI service
Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
package com.itangcent.ai
2+
3+
import com.itangcent.common.logger.Log
4+
import com.itangcent.common.logger.traceError
5+
6+
/**
7+
* A utility class for checking the health and availability of AI services.
8+
* This class provides methods to verify if AI services are operational and can handle requests.
9+
*/
10+
object AIServiceHealthChecker : Log() {
11+
/**
12+
* Checks if the AI service is available and can handle requests.
13+
* For regular AI services, it verifies by sending a simple test prompt.
14+
* For Local LLM clients, it checks if there are any available models.
15+
*
16+
* @return true if the service is available and can handle requests, false otherwise
17+
*/
18+
fun AIService.isAvailable(): Boolean {
19+
if (this is LocalLLMClient) {
20+
return this.hasAvailableModels()
21+
}
22+
return try {
23+
val response =
24+
sendPrompt(systemMessage = "Answer Question", userPrompt = "Please respond with exactly 'YES'")
25+
response.contains("YES", ignoreCase = true)
26+
} catch (e: Exception) {
27+
LOG.traceError("Failed to check AI service", e)
28+
false
29+
}
30+
}
31+
32+
/**
33+
* Checks if the Local LLM client has any available models.
34+
* This is used to verify if the local LLM service is properly configured and ready to use.
35+
*
36+
* @return true if there are available models, false otherwise
37+
*/
38+
fun LocalLLMClient.hasAvailableModels(): Boolean {
39+
try {
40+
val availableModels = this.getAvailableModels()
41+
return availableModels.isNotEmpty()
42+
} catch (e: Exception) {
43+
LOG.traceError("Failed to check AI service", e)
44+
return false
45+
}
46+
}
47+
}
Lines changed: 101 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,101 @@
1+
package com.itangcent.ai
2+
3+
import com.itangcent.common.utils.GsonUtils
4+
import com.itangcent.http.HttpClient
5+
import com.itangcent.http.RawContentType
6+
import com.itangcent.http.contentType
7+
import com.itangcent.idea.plugin.utils.AIUtils
8+
import com.itangcent.intellij.extend.sub
9+
10+
/**
11+
* Client implementation for interacting with a local LLM server.
12+
* This class handles the direct communication with the LLM server API.
13+
*/
14+
class LocalLLMClient(
15+
private val serverUrl: String,
16+
private val modelName: String,
17+
private val httpClient: HttpClient
18+
) : AIService {
19+
20+
companion object {
21+
private const val CHAT_COMPLETIONS_ENDPOINT = "/chat/completions"
22+
private const val MODELS_ENDPOINT = "/models"
23+
}
24+
25+
/**
26+
* Sends a prompt to the local LLM service with a custom system message.
27+
*
28+
* @param systemMessage The system message that sets the context for the LLM
29+
* @param userPrompt The user's input prompt to be processed
30+
* @return The LLM's response as a string
31+
* @throws AIConfigurationException if the local LLM server URL is not configured
32+
* @throws AIApiException if there's an error in the API response or communication
33+
*/
34+
override fun sendPrompt(systemMessage: String, userPrompt: String): String {
35+
val fullUrl = "$serverUrl$CHAT_COMPLETIONS_ENDPOINT"
36+
try {
37+
val requestBodyMap = mapOf(
38+
"messages" to listOf(
39+
mapOf("role" to "system", "content" to systemMessage),
40+
mapOf("role" to "user", "content" to userPrompt)
41+
),
42+
"model" to modelName,
43+
"stream" to false
44+
)
45+
46+
val requestBody = GsonUtils.toJson(requestBodyMap)
47+
val httpRequest = httpClient.post(fullUrl)
48+
.contentType(RawContentType.APPLICATION_JSON)
49+
.body(requestBody)
50+
51+
val httpResponse = httpRequest.call()
52+
53+
if (httpResponse.code() != 200) {
54+
val errorMessage =
55+
"Local LLM server returned status code ${httpResponse.code()}: ${httpResponse.string()}"
56+
throw AIApiException(errorMessage)
57+
}
58+
59+
val responseBody = httpResponse.string() ?: throw AIApiException("Empty response from Local LLM server")
60+
val jsonElement = GsonUtils.parseToJsonTree(responseBody)
61+
val content = jsonElement.sub("choices")?.asJsonArray?.firstOrNull()
62+
?.asJsonObject?.sub("message")?.sub("content")?.asString
63+
val errMsg = jsonElement.sub("error")?.asString
64+
return content?.let { AIUtils.cleanMarkdownCodeBlocks(it) }
65+
?: throw AIApiException(errMsg ?: "Could not parse response from Local LLM server")
66+
} catch (e: AIException) {
67+
throw e
68+
} catch (e: Exception) {
69+
throw AIApiException("Error calling Local LLM server: ${e.message}", e)
70+
}
71+
}
72+
73+
/**
74+
* Retrieves the list of available models from the local LLM server.
75+
*
76+
* @return List of model IDs available on the server
77+
* @throws AIApiException if there's an error in the API response or communication
78+
*/
79+
fun getAvailableModels(): List<String> {
80+
val url = "$serverUrl$MODELS_ENDPOINT"
81+
82+
try {
83+
val response = httpClient.get(url).call()
84+
85+
if (response.code() != 200) {
86+
throw AIApiException("Failed to get models: ${response.code()}")
87+
}
88+
89+
val responseBody = response.string() ?: throw AIApiException("Empty response from server")
90+
val jsonElement = GsonUtils.parseToJsonTree(responseBody)
91+
val dataArray = jsonElement.sub("data")?.asJsonArray
92+
?: throw AIApiException("Invalid response format: missing 'data' array")
93+
94+
return dataArray.mapNotNull { modelObj ->
95+
modelObj.asJsonObject.sub("id")?.asString
96+
}
97+
} catch (e: Exception) {
98+
throw AIApiException("Error getting models: ${e.message}", e)
99+
}
100+
}
101+
}
Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,78 @@
1+
package com.itangcent.ai
2+
3+
import com.itangcent.ai.AIServiceHealthChecker.isAvailable
4+
import com.itangcent.common.logger.Log
5+
import com.itangcent.http.HttpClient
6+
7+
/**
8+
* Utility class for discovering and validating LocalLLM servers.
9+
* This class attempts to find a working LocalLLM server by trying various common API endpoint suffixes.
10+
*
11+
* @property httpClient The HTTP client used for making requests
12+
* @property possibleSuffixes List of possible API endpoint suffixes to try
13+
*/
14+
class LocalLLMServerDiscoverer(
15+
private val httpClient: HttpClient,
16+
private val possibleSuffixes: List<String> = DEFAULT_SUFFIXES
17+
) {
18+
companion object : Log() {
19+
private val DEFAULT_SUFFIXES = listOf(
20+
"/v1",
21+
"/api/v1",
22+
"/api",
23+
"/v1/api"
24+
)
25+
}
26+
27+
/**
28+
* Attempts to discover a working LocalLLM server URL from a base URL.
29+
* The method will try the base URL first, then attempt various API endpoint suffixes.
30+
*
31+
* @param baseUrl The base URL to start searching from (e.g., "http://localhost:8000")
32+
* @return The working server URL if found, null otherwise
33+
*/
34+
fun discoverServer(baseUrl: String): String? {
35+
val trimmedUrl = baseUrl.trimEnd('/')
36+
if (validateLocalLLMServer(trimmedUrl)) {
37+
LOG.debug("Found working server at base URL: $trimmedUrl")
38+
return trimmedUrl
39+
}
40+
41+
// Try all possible suffixes
42+
for (suffix in possibleSuffixes) {
43+
if (baseUrl.endsWith(suffix)) {
44+
LOG.debug("Skipping suffix $suffix as it's already in the base URL")
45+
continue
46+
}
47+
val serverUrl = if (suffix.isEmpty()) trimmedUrl else "$trimmedUrl$suffix"
48+
if (validateLocalLLMServer(serverUrl)) {
49+
LOG.debug("Found working server at URL: $serverUrl")
50+
return serverUrl
51+
}
52+
}
53+
54+
LOG.warn("No working LocalLLM server found for base URL: $baseUrl")
55+
return null
56+
}
57+
58+
/**
59+
* Validates if a given URL points to a working LocalLLM server.
60+
* A server is considered working if it responds to health checks and supports the required API endpoints.
61+
*
62+
* @param serverUrl The URL to validate
63+
* @return true if the server is working, false otherwise
64+
*/
65+
private fun validateLocalLLMServer(serverUrl: String): Boolean {
66+
try {
67+
val localLLMService = LocalLLMClient(
68+
serverUrl = serverUrl,
69+
modelName = "",
70+
httpClient = httpClient
71+
)
72+
return localLLMService.isAvailable()
73+
} catch (e: Exception) {
74+
LOG.debug("Server validation failed for $serverUrl: ${e.message}")
75+
return false
76+
}
77+
}
78+
}
Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
package com.itangcent.ai
2+
3+
import com.google.inject.Inject
4+
import com.google.inject.Singleton
5+
import com.itangcent.idea.plugin.condition.ConditionOnSetting
6+
import com.itangcent.idea.plugin.settings.helper.AISettingsHelper
7+
import com.itangcent.suv.http.HttpClientProvider
8+
9+
10+
/**
11+
* Implementation of AIService that interfaces with a local LLM server.
12+
*/
13+
@Singleton
14+
@ConditionOnSetting("aiProvider", havingValue = "LocalLLM")
15+
open class LocalLLMService : AIService {
16+
17+
companion object
18+
19+
@Inject
20+
private lateinit var aiSettingsHelper: AISettingsHelper
21+
22+
@Inject
23+
private lateinit var httpClientProvider: HttpClientProvider
24+
25+
private val rawLocalLLMService: LocalLLMClient by lazy {
26+
LocalLLMClient(getServerUrl(), getModelName(), httpClientProvider.getHttpClient())
27+
}
28+
29+
/**
30+
* Sends a prompt to the local LLM service with a custom system message.
31+
*
32+
* @param systemMessage The system message that sets the context for the LLM
33+
* @param userPrompt The user's input prompt to be processed
34+
* @return The LLM's response as a string
35+
* @throws AIConfigurationException if the local LLM server URL is not configured
36+
* @throws AIApiException if there's an error in the API response or communication
37+
*/
38+
override fun sendPrompt(systemMessage: String, userPrompt: String): String {
39+
return rawLocalLLMService.sendPrompt(systemMessage, userPrompt)
40+
}
41+
42+
/**
43+
* Retrieves the configured local LLM server URL from settings.
44+
*
45+
* @return The configured server URL
46+
* @throws AIConfigurationException if the URL is not configured
47+
*/
48+
private fun getServerUrl(): String {
49+
return aiSettingsHelper.aiLocalServerUrl
50+
?: throw AIConfigurationException("Local LLM server URL is not configured")
51+
}
52+
53+
/**
54+
* Retrieves the configured model name from settings or returns a default value.
55+
*
56+
* @return The configured model name or "local-model" as default
57+
*/
58+
private fun getModelName(): String {
59+
return aiSettingsHelper.aiModel ?: "local-model"
60+
}
61+
}
62+

0 commit comments

Comments
 (0)