Skip to content

Commit ba06239

Browse files
fix(adapters): openai_compatible no longer forces schema (#914)
See #918
1 parent 57ea8fe commit ba06239

File tree

3 files changed

+121
-197
lines changed

3 files changed

+121
-197
lines changed

doc/codecompanion.txt

Lines changed: 60 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -630,7 +630,11 @@ Many adapters allow model selection via the `schema.model.default` property:
630630
EXAMPLE: USING OPENAI COMPATIBLE MODELS ~
631631

632632
To use any other OpenAI compatible models, change the URL in the env table, set
633-
an API key:
633+
an API key and define the schema:
634+
635+
Note: The schema in this instance is provided only as an example and must be modified
636+
according to the requirements of the model you are using.
637+
The options are chosen to show how to use different types.
634638

635639
>lua
636640
require("codecompanion").setup({
@@ -642,6 +646,61 @@ an API key:
642646
api_key = "OpenAI_API_KEY", -- optional: if your endpoint is authenticated
643647
chat_url = "/v1/chat/completions", -- optional: default value, override if different
644648
},
649+
schema = {
650+
model = {
651+
default = "deepseek-r1-671b", -- define llm model to be used
652+
},
653+
temperature = {
654+
order = 2,
655+
mapping = "parameters",
656+
type = "number",
657+
optional = true,
658+
default = 0.8,
659+
desc = "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or top_p but not both.",
660+
validate = function(n)
661+
return n >= 0 and n <= 2, "Must be between 0 and 2"
662+
end,
663+
},
664+
max_completion_tokens = {
665+
order = 3,
666+
mapping = "parameters",
667+
type = "integer",
668+
optional = true,
669+
default = nil,
670+
desc = "An upper bound for the number of tokens that can be generated for a completion.",
671+
validate = function(n)
672+
return n > 0, "Must be greater than 0"
673+
end,
674+
},
675+
stop = {
676+
order = 4,
677+
mapping = "parameters",
678+
type = "string",
679+
optional = true,
680+
default = nil,
681+
desc = "Sets the stop sequences to use. When this pattern is encountered the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.",
682+
validate = function(s)
683+
return s:len() > 0, "Cannot be an empty string"
684+
end,
685+
},
686+
logit_bias = {
687+
order = 5,
688+
mapping = "parameters",
689+
type = "map",
690+
optional = true,
691+
default = nil,
692+
desc = "Modify the likelihood of specified tokens appearing in the completion. Maps tokens (specified by their token ID) to an associated bias value from -100 to 100. Use https://platform.openai.com/tokenizer to find token IDs.",
693+
subtype_key = {
694+
type = "integer",
695+
},
696+
subtype = {
697+
type = "integer",
698+
validate = function(n)
699+
return n >= -100 and n <= 100, "Must be between -100 and 100"
700+
end,
701+
},
702+
},
703+
},
645704
})
646705
end,
647706
},

doc/configuration/adapters.md

Lines changed: 61 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
# Configuring Adapters
22

33
> [!NOTE]
4-
> The adapters that the plugin supports out of the box can be found [here](https://github.com/olimorris/codecompanion.nvim/tree/main/lua/codecompanion/adapters). It is recommended that you review them so you better understand the settings that can be customized
4+
> The adapters that the plugin supports out of the box can be found [here](https://github.com/olimorris/codecompanion.nvim/tree/main/lua/codecompanion/adapters). It is recommended that you review them so you better understand the settings that can be customized
55
66
An adapter is what connects Neovim to an LLM. It's the interface that allows data to be sent, received and processed and there are a multitude of ways to customize them.
77

@@ -140,18 +140,76 @@ require("codecompanion").setup({
140140

141141
## Example: Using OpenAI Compatible Models
142142

143-
To use any other OpenAI compatible models, change the URL in the env table, set an API key:
143+
To use any other OpenAI compatible models, change the URL in the env table, set an API key and define the schema:
144+
145+
> [!NOTE]
146+
> The schema in this instance is provided only as an example and must be modified according to the requirements of the model you use. The options are chosen to show how to use different types of parameters.
144147
145148
```lua
146149
require("codecompanion").setup({
147150
adapters = {
148-
ollama = function()
151+
my_openai = function()
149152
return require("codecompanion.adapters").extend("openai_compatible", {
150153
env = {
151154
url = "http[s]://open_compatible_ai_url", -- optional: default value is ollama url http://127.0.0.1:11434
152155
api_key = "OpenAI_API_KEY", -- optional: if your endpoint is authenticated
153156
chat_url = "/v1/chat/completions", -- optional: default value, override if different
154157
},
158+
schema = {
159+
model = {
160+
default = "deepseek-r1-671b", -- define llm model to be used
161+
},
162+
temperature = {
163+
order = 2,
164+
mapping = "parameters",
165+
type = "number",
166+
optional = true,
167+
default = 0.8,
168+
desc = "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or top_p but not both.",
169+
validate = function(n)
170+
return n >= 0 and n <= 2, "Must be between 0 and 2"
171+
end,
172+
},
173+
max_completion_tokens = {
174+
order = 3,
175+
mapping = "parameters",
176+
type = "integer",
177+
optional = true,
178+
default = nil,
179+
desc = "An upper bound for the number of tokens that can be generated for a completion.",
180+
validate = function(n)
181+
return n > 0, "Must be greater than 0"
182+
end,
183+
},
184+
stop = {
185+
order = 4,
186+
mapping = "parameters",
187+
type = "string",
188+
optional = true,
189+
default = nil,
190+
desc = "Sets the stop sequences to use. When this pattern is encountered the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.",
191+
validate = function(s)
192+
return s:len() > 0, "Cannot be an empty string"
193+
end,
194+
},
195+
logit_bias = {
196+
order = 5,
197+
mapping = "parameters",
198+
type = "map",
199+
optional = true,
200+
default = nil,
201+
desc = "Modify the likelihood of specified tokens appearing in the completion. Maps tokens (specified by their token ID) to an associated bias value from -100 to 100. Use https://platform.openai.com/tokenizer to find token IDs.",
202+
subtype_key = {
203+
type = "integer",
204+
},
205+
subtype = {
206+
type = "integer",
207+
validate = function(n)
208+
return n >= -100 and n <= 100, "Must be between -100 and 100"
209+
end,
210+
},
211+
},
212+
},
155213
})
156214
end,
157215
},

lua/codecompanion/adapters/openai_compatible.lua

Lines changed: 0 additions & 193 deletions
Original file line numberDiff line numberDiff line change
@@ -130,198 +130,5 @@ return {
130130
return get_models(self)
131131
end,
132132
},
133-
temperature = {
134-
order = 2,
135-
mapping = "parameters.options",
136-
type = "number",
137-
optional = true,
138-
default = 0.8,
139-
desc = "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or top_p but not both.",
140-
validate = function(n)
141-
return n >= 0 and n <= 2, "Must be between 0 and 2"
142-
end,
143-
},
144-
num_ctx = {
145-
order = 3,
146-
mapping = "parameters.options",
147-
type = "number",
148-
optional = true,
149-
default = 2048,
150-
desc = "The maximum number of tokens that the language model can consider at once. This determines the size of the input context window, allowing the model to take into account longer text passages for generating responses. Adjusting this value can affect the model's performance and memory usage.",
151-
validate = function(n)
152-
return n > 0, "Must be a positive number"
153-
end,
154-
},
155-
mirostat = {
156-
order = 4,
157-
mapping = "parameters.options",
158-
type = "number",
159-
optional = true,
160-
default = 0,
161-
desc = "Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)",
162-
validate = function(n)
163-
return n == 0 or n == 1 or n == 2, "Must be 0, 1, or 2"
164-
end,
165-
},
166-
mirostat_eta = {
167-
order = 5,
168-
mapping = "parameters.options",
169-
type = "number",
170-
optional = true,
171-
default = 0.1,
172-
desc = "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)",
173-
validate = function(n)
174-
return n > 0, "Must be a positive number"
175-
end,
176-
},
177-
mirostat_tau = {
178-
order = 6,
179-
mapping = "parameters.options",
180-
type = "number",
181-
optional = true,
182-
default = 5.0,
183-
desc = "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)",
184-
validate = function(n)
185-
return n > 0, "Must be a positive number"
186-
end,
187-
},
188-
repeat_last_n = {
189-
order = 7,
190-
mapping = "parameters.options",
191-
type = "number",
192-
optional = true,
193-
default = 64,
194-
desc = "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)",
195-
validate = function(n)
196-
return n >= -1, "Must be -1 or greater"
197-
end,
198-
},
199-
repeat_penalty = {
200-
order = 8,
201-
mapping = "parameters.options",
202-
type = "number",
203-
optional = true,
204-
default = 1.1,
205-
desc = "Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1)",
206-
validate = function(n)
207-
return n >= 0, "Must be a non-negative number"
208-
end,
209-
},
210-
seed = {
211-
order = 9,
212-
mapping = "parameters.options",
213-
type = "number",
214-
optional = true,
215-
default = 0,
216-
desc = "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: 0)",
217-
validate = function(n)
218-
return n >= 0, "Must be a non-negative number"
219-
end,
220-
},
221-
stop = {
222-
order = 10,
223-
mapping = "parameters.options",
224-
type = "string",
225-
optional = true,
226-
default = nil,
227-
desc = "Sets the stop sequences to use. When this pattern is encountered the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.",
228-
validate = function(s)
229-
return s:len() > 0, "Cannot be an empty string"
230-
end,
231-
},
232-
tfs_z = {
233-
order = 11,
234-
mapping = "parameters.options",
235-
type = "number",
236-
optional = true,
237-
default = 1.0,
238-
desc = "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)",
239-
validate = function(n)
240-
return n >= 0, "Must be a non-negative number"
241-
end,
242-
},
243-
num_predict = {
244-
order = 12,
245-
mapping = "parameters.options",
246-
type = "number",
247-
optional = true,
248-
default = -1,
249-
desc = "Maximum number of tokens to predict when generating text. (Default: -1, -1 = infinite generation, -2 = fill context)",
250-
validate = function(n)
251-
return n >= -2, "Must be -2 or greater"
252-
end,
253-
},
254-
top_k = {
255-
order = 13,
256-
mapping = "parameters.options",
257-
type = "number",
258-
optional = true,
259-
default = 40,
260-
desc = "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)",
261-
validate = function(n)
262-
return n >= 0, "Must be a non-negative number"
263-
end,
264-
},
265-
top_p = {
266-
order = 14,
267-
mapping = "parameters.options",
268-
type = "number",
269-
optional = true,
270-
default = 0.9,
271-
desc = "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)",
272-
validate = function(n)
273-
return n >= 0 and n <= 1, "Must be between 0 and 1"
274-
end,
275-
},
276-
max_tokens = {
277-
order = 5,
278-
mapping = "parameters",
279-
type = "integer",
280-
optional = true,
281-
default = nil,
282-
desc = "The maximum number of tokens to generate in the chat completion. The total length of input tokens and generated tokens is limited by the model's context length.",
283-
validate = function(n)
284-
return n > 0, "Must be greater than 0"
285-
end,
286-
},
287-
presence_penalty = {
288-
order = 6,
289-
mapping = "parameters",
290-
type = "number",
291-
optional = true,
292-
default = 0,
293-
desc = "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.",
294-
validate = function(n)
295-
return n >= -2 and n <= 2, "Must be between -2 and 2"
296-
end,
297-
},
298-
frequency_penalty = {
299-
order = 7,
300-
mapping = "parameters",
301-
type = "number",
302-
optional = true,
303-
default = 0,
304-
desc = "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
305-
validate = function(n)
306-
return n >= -2 and n <= 2, "Must be between -2 and 2"
307-
end,
308-
},
309-
logit_bias = {
310-
order = 8,
311-
mapping = "parameters",
312-
type = "map",
313-
optional = true,
314-
default = nil,
315-
desc = "Modify the likelihood of specified tokens appearing in the completion. Maps tokens (specified by their token ID) to an associated bias value from -100 to 100. Use https://platform.openai.com/tokenizer to find token IDs.",
316-
subtype_key = {
317-
type = "integer",
318-
},
319-
subtype = {
320-
type = "integer",
321-
validate = function(n)
322-
return n >= -100 and n <= 100, "Must be between -100 and 100"
323-
end,
324-
},
325-
},
326133
},
327134
}

0 commit comments

Comments
 (0)