You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
DashScop (Bailian, Alibaba Cloud's large model service platform) is a comprehensive one-stop platform for large model development and application building. It offers official API access to Tongyi Qianwe (Qwen) for commercial use while also supporting mainstream third-party large models across text, image, audio-video, and other modalities, along with industry-specific customized models.
Additionally, it provides capabilities such as automated prompt optimizatio, knowledge base managemen, function callin, workflow orchestratio, and model customizatio, enabling users to quickly develop production-ready large model applications.
Configuration of the adapter:
require("codecompanion").setup({
adapters= {
http= {
dashscope=function()
locallog=require("codecompanion.utils.log")
localopenai=require("codecompanion.adapters.http.openai")
localutils=require("codecompanion.utils.adapters")
---@classCodeCompanion.HTTPAdapter.DashScope:CodeCompanion.HTTPAdapterreturn {
name="dashscope",
formatted_name="DashScope",
roles= {
llm="assistant",
user="user",
},
opts= {
stream=true,
tools=true,
vision=false,
},
features= {
text=true,
tokens=true,
},
url="https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions",
env= {
api_key="DASHSCOPE_API_KEY",
},
headers= {
["Content-Type"] ="application/json",
Authorization="Bearer ${api_key}",
},
handlers= {
--- Use the OpenAI adapter for the bulk of the worksetup=function(self)
returnopenai.handlers.setup(self)
end,
tokens=function(self, data)
returnopenai.handlers.tokens(self, data)
end,
form_parameters=function(self, params, messages)
returnopenai.handlers.form_parameters(self, params, messages)
end,
form_tools=function(self, tools)
localmodel=self.schema.model.defaultlocalmodel_opts=self.schema.model.choices[model]
ifmodel_opts.optsandmodel_opts.opts.can_use_tools==falsetheniftoolsandvim.tbl_count(tools) >0thenlog:warn("Tools are not supported for this model")
endreturnendreturnopenai.handlers.form_tools(self, tools)
end,
---Set the format of the role and content for the messages from the chat buffer---@paramselfCodeCompanion.HTTPAdapter---@parammessagestable Format is: { { role = "user", content = "Your prompt here" } }---@returntableform_messages=function(self, messages)
messages=utils.merge_messages(messages)
messages=utils.merge_system_messages(messages)
-- Ensure that all messages have a content fieldmessages=vim
.iter(messages)
:map(function(msg)
localcontent=msg.contentifcontentandtype(content) =="table" thenmsg.content=table.concat(content, "\n")
elseifnotcontentthenmsg.content=""endreturnmsgend)
:totable()
return { messages=messages }
end,
---Output the data from the API ready for insertion into the chat buffer---@paramselfCodeCompanion.HTTPAdapter---@paramdatatable The streamed JSON data from the API, also formatted by the format_data handler---@paramtools?table The table to write any tool output to---@return{ status: string, output: { role: string, content: string, reasoning: string?} } | nilchat_output=function(self, data, tools)
localoutput= {}
ifdataanddata~="" thenlocaldata_mod=utils.clean_streamed_data(data)
localok, json=pcall(vim.json.decode, data_mod, { luanil= { object=true } })
ifokandjson.choicesand#json.choices>0thenlocalchoice=json.choices[1]
localdelta= (self.optsandself.opts.stream) andchoice.deltaorchoice.messageifdeltathenoutput.role=delta.roleoutput.content=delta.contentifdelta.reasoning_contentthenoutput.reasoning=output.reasoningor {}
output.reasoning.content=delta.reasoning_contentend-- Process toolsifself.opts.toolsanddelta.tool_callsandtoolsthenfor_, toolinipairs(delta.tool_calls) doifself.opts.streamthenlocalindex=tool.indexlocalfound=falsefori, existing_toolinipairs(tools) doifexisting_tool._index==indexthentools[i]["function"].arguments= (tools[i]["function"].argumentsor"")
.. (tool["function"]["arguments"] or"")
found=truebreakendendifnotfoundthentable.insert(tools, {
["function"] = {
name=tool["function"]["name"],
arguments=tool["function"]["arguments"] or"",
},
id=tool.id,
type="function",
_index=index,
})
endelsetable.insert(tools, {
_index=tool.index,
["function"] = {
name=tool["function"]["name"],
arguments=tool["function"]["arguments"],
},
id=tool.id,
type="function",
})
endendendreturn {
status="success",
output=output,
}
endendendend,
inline_output=function(self, data, context)
returnopenai.handlers.inline_output(self, data, context)
end,
tools= {
format_tool_calls=function(self, tools)
returnopenai.handlers.tools.format_tool_calls(self, tools)
end,
output_response=function(self, tool_call, output)
returnopenai.handlers.tools.output_response(self, tool_call, output)
end,
},
on_exit=function(self, data)
returnopenai.handlers.on_exit(self, data)
end,
},
schema= {
---@typeCodeCompanion.Schemamodel= {
order=1,
mapping="parameters",
type="enum",
desc="ID of the model to use.",
---@typestring|fun(): stringdefault="qwen-plus",
choices= {
["qwen-plus"] = { formatted_name="DashScope", opts= { can_reason=true, can_use_tools=false } },
["qwen-flash"] = { formatted_name="DashScope", opts= { can_reason=true, can_use_tools=true } },
["qwen3-max"] = { formatted_name="DashScope", opts= { can_use_tools=true } },
["qwen3-coder-plus"] = { formatted_name="DashScope", opts= { can_use_tools=true } },
},
},
---@typeCodeCompanion.Schematemperature= {
order=2,
mapping="parameters",
type="number",
optional=true,
default=0.7,
desc="Sampling temperature, controlling the diversity of the model's generated text. A higher temperature increases the diversity of the output, while a lower temperature makes it more deterministic. Range: [0, 2). Since both temperature and top_p can control the diversity of generated text, it is recommended to set only one of them.",
validate=function(n)
returnn>=0andn<2, "Must be between 0 and 2"end,
},
---@typeCodeCompanion.Schematop_p= {
order=3,
mapping="parameters",
type="number",
optional=true,
default=0.95,
desc="The probability threshold for nucleus sampling, controlling the diversity of text generated by the model. Higher top_p values result in more diverse outputs, while lower values produce more deterministic text. Range: (0 and 1]. 0.01 is recommended for qwen3-coder model",
validate=function(n)
returnn>0andn<=1, "Must be between 0 and 1"end,
},
---@typeCodeCompanion.Schematop_k= {
order=3,
mapping="parameters",
type="number",
optional=true,
default=20,
desc="The size of the candidate set for sampling during generation. For example, when set to 50, only the top 50 highest-scoring tokens from a single generation are included in the random sampling candidate set. A higher value increases the randomness of the output, while a lower value enhances determinism. When set to None or when top_k exceeds 100, the top_k strategy is disabled, and only the top_p strategy takes effect.",
validate=function(n)
returnn>=0, "Must be greater than or equal to 0"end,
},
---@typeCodeCompanion.Schemapresence_penalty= {
order=4,
mapping="parameters",
type="number",
optional=true,
default=0,
desc="Controlling the repetition of content in model-generated text. Range: [-2.0, 2.0]. Positive values reduce repetition, while negative values increase it.",
validate=function(n)
returnn>=-2andn<=2, "Must be between -2 and 2"end,
},
---@typeCodeCompanion.Schemamax_tokens= {
order=5,
mapping="parameters",
type="integer",
optional=true,
default=16384,
desc="The maximum number of tokens to generate in the chat completion. The total length of input tokens and generated tokens is limited by the model's context length.",
validate=function(n)
returnn>0, "Must be greater than 0"end,
},
---@typeCodeCompanion.Schemaenable_thinking= {
order=6,
mapping="parameters",
type="boolean",
optional=true,
default=true,
desc="Whether to activate the thinking mode.",
subtype_key= {
type="integer",
},
},
---@typeCodeCompanion.Schemathinking_budget= {
order=7,
mapping="parameters",
type="integer",
optional=true,
default=nil,
desc="The maximum length of the thinking process takes effect only when enable_thinking is true.",
validate=function(n)
returnn>0, "Must be greater than 0"end,
},
---@typeCodeCompanion.Schemaseed= {
order=8,
mapping="parameters",
type="integer",
optional=true,
default=nil,
desc="Setting the seed parameter makes the text generation process more deterministic, typically used to ensure consistent results across model runs. By passing the same seed value in each model call while keeping other parameters unchanged, the model will attempt to return identical results. Range: [0 to 2147483647].",
validate=function(n)
returnn>=0andn<=2147483647, "Must be between 0 and 2147483647"end,
},
---@typeCodeCompanion.Schemalogprobs= {
order=9,
mapping="parameters",
type="boolean",
optional=true,
default=nil,
desc="Whether to return log probabilities of the output tokens or not.",
subtype_key= {
type="integer",
},
},
---@typeCodeCompanion.Schematop_logprobs= {
order=10,
mapping="parameters",
type="integer",
optional=true,
default=nil,
desc="Number of top candidate tokens to return per step. Range: [0, 5]. It takes effect only when logprobs is true.",
validate=function(n)
returnn>=0andn<=5, "Must be between 0 and 5"end,
},
---@typeCodeCompanion.Schemastop= {
order=11,
mapping="parameters",
type="list",
optional=true,
default=nil,
subtype= {
type="string",
},
desc="Generation will automatically stop when the text generated by the model is about to contain the specified string or token_id.",
validate=function(l)
return#l>=1and#l<=256, "Must have between 1 and 256 elements"end,
},
---@typeCodeCompanion.Schemaenable_search= {
order=12,
mapping="parameters",
type="boolean",
optional=true,
default=true,
desc="Whether the model uses Internet search results as a reference when generating text.",
subtype_key= {
type="integer",
},
},
---@typeCodeCompanion.Schemasearch_options= {
order=13,
mapping="parameters",
type="object",
optional=true,
default= {
forced_search=true,
search_strategy="turbo", --turbo / maxenable_search_extension=false,
},
desc="The strategy of online search. It takes effect only when enable_search is true.",
},
---@typeCodeCompanion.Schemauser= {
order=14,
mapping="parameters",
type="string",
optional=true,
default=nil,
desc="A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. Learn more.",
validate=function(u)
returnu:len() <100, "Cannot be longer than 100 characters"end,
},
},
}
end,
}
}
})
reacted with thumbs up emoji reacted with thumbs down emoji reacted with laugh emoji reacted with hooray emoji reacted with confused emoji reacted with heart emoji reacted with rocket emoji reacted with eyes emoji
Uh oh!
There was an error while loading. Please reload this page.
-
DashScop (Bailian, Alibaba Cloud's large model service platform) is a comprehensive one-stop platform for large model development and application building. It offers official API access to Tongyi Qianwe (Qwen) for commercial use while also supporting mainstream third-party large models across text, image, audio-video, and other modalities, along with industry-specific customized models.
Additionally, it provides capabilities such as automated prompt optimizatio, knowledge base managemen, function callin, workflow orchestratio, and model customizatio, enabling users to quickly develop production-ready large model applications.
Configuration of the adapter:
Beta Was this translation helpful? Give feedback.
All reactions