diff --git a/lua/gp/config.lua b/lua/gp/config.lua index 43cf729..b94953c 100644 --- a/lua/gp/config.lua +++ b/lua/gp/config.lua @@ -49,7 +49,7 @@ local config = { }, ollama = { disable = true, - endpoint = "http://localhost:11434/v1/chat/completions", + endpoint = "http://localhost:11434/api/chat", secret = "dummy_secret", }, lmstudio = { @@ -196,6 +196,19 @@ local config = { -- system prompt (use this to specify the persona/role of the AI) system_prompt = "You are a general AI assistant.", }, + { + provider = "ollama", + name = "ChatQwen3-8B", + chat = true, + command = false, + -- string with model name or table with model name and parameters + model = { + model = "qwen3:8b", + think = false, -- toggle thinking mode for Ollama's thinking models + }, + -- system prompt (use this to specify the persona/role of the AI) + system_prompt = "You are a general AI assistant.", + }, { provider = "lmstudio", name = "ChatLMStudio", diff --git a/lua/gp/dispatcher.lua b/lua/gp/dispatcher.lua index e977d1d..960ed31 100644 --- a/lua/gp/dispatcher.lua +++ b/lua/gp/dispatcher.lua @@ -161,6 +161,41 @@ D.prepare_payload = function(messages, model, provider) return payload end + if provider == "ollama" then + local payload = { + model = model.model, + stream = true, + messages = messages, + } + + if model.think ~= nil then + payload.think = model.think + end + + local options = {} + if model.temperature then + options.temperature = math.max(0, math.min(2, model.temperature)) + end + if model.top_p then + options.top_p = math.max(0, math.min(1, model.top_p)) + end + if model.min_p then + options.min_p = math.max(0, math.min(1, model.min_p)) + end + if model.num_ctx then + options.num_ctx = model.num_ctx + end + if model.top_k then + options.top_k = model.top_k + end + + if next(options) then + payload.options = options + end + + return payload + end + local output = { model = model.model, stream = true, @@ -270,6 +305,15 @@ local query = function(buf, provider, payload, handler, on_exit, callback) end end + if qt.provider == "ollama" then + if line:match('"message":') and line:match('"content":') then + local success, decoded = pcall(vim.json.decode, line) + if success and decoded.message and decoded.message.content then + content = decoded.message.content + end + end + end + if content and type(content) == "string" then qt.response = qt.response .. content @@ -391,6 +435,8 @@ local query = function(buf, provider, payload, handler, on_exit, callback) "api-key: " .. bearer, } endpoint = render.template_replace(endpoint, "{{model}}", payload.model) + elseif provider == "ollama" then + headers = {} else -- default to openai compatible headers headers = { "-H",