Skip to content

Commit 3d12297

Browse files
committed
set claude-2 as default model
1 parent 704df23 commit 3d12297

File tree

2 files changed

+24
-126
lines changed

2 files changed

+24
-126
lines changed

package.json

Lines changed: 0 additions & 123 deletions
Original file line numberDiff line numberDiff line change
@@ -262,129 +262,6 @@
262262
"order": 3,
263263
"markdownDescription": "properties for claude-2 model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
264264
},
265-
"devchat.customModel": {
266-
"type": "array",
267-
"items": {
268-
"type": "object",
269-
"properties": {
270-
"model": {
271-
"oneOf": [
272-
{
273-
"type": "string",
274-
"enum": [
275-
"openai/gpt-4",
276-
"openai/gpt-4-0613",
277-
"openai/gpt-4-0314",
278-
"openai/gpt-4-32k",
279-
"openai/gpt-4-32k-0314",
280-
"openai/gpt-4-32k-0613",
281-
"openai/gpt-3.5-turbo",
282-
"openai/gpt-3.5-turbo-0301",
283-
"openai/gpt-3.5-turbo-0613",
284-
"openai/gpt-3.5-turbo-16k",
285-
"openai/gpt-3.5-turbo-16k-0613",
286-
"openai/text-davinci-003",
287-
"openai/curie-001",
288-
"openai/babbage-001",
289-
"openai/ada-001",
290-
"openai/babbage-002",
291-
"openai/davinci-002",
292-
"cohere/command-nightly",
293-
"cohere/command",
294-
"cohere/command-light",
295-
"cohere/command-medium-beta",
296-
"cohere/command-xlarge-beta",
297-
"anthropic/claude-2",
298-
"anthropic/claude-instant-1",
299-
"anthropic/claude-instant-1.2",
300-
"replicate/replicate/",
301-
"replicate/replicate/llama-2-70b-chat:58d078176e02c219e11eb4da5a02a7830a283b14cf8f94537af893ccff5ee781",
302-
"replicate/a16z-infra/llama-2-13b-chat:2a7f981751ec7fdf87b5b91ad4db53683a98082e9ff7bfd12c8cd5ea85980a52",
303-
"replicate/joehoover/instructblip-vicuna13b:c4c54e3c8c97cd50c2d2fec9be3b6065563ccf7d43787fb99f84151b867178fe",
304-
"replicate/replicate/dolly-v2-12b:ef0e1aefc61f8e096ebe4db6b2bacc297daf2ef6899f0f7e001ec445893500e5",
305-
"replicate/a16z-infra/llama-2-7b-chat:7b0bfc9aff140d5b75bacbed23e91fd3c34b01a1e958d32132de6e0a19796e2c",
306-
"replicate/replicate/vicuna-13b:6282abe6a492de4145d7bb601023762212f9ddbbe78278bd6771c8b3b2f2a13b",
307-
"replicate/daanelson/flan-t5-large:ce962b3f6792a57074a601d3979db5839697add2e4e02696b3ced4c022d4767f",
308-
"replicate/replit/replit-code-v1-3b:b84f4c074b807211cd75e3e8b1589b6399052125b4c27106e43d47189e8415ad",
309-
"huggingface/meta-llama/Llama-2-7b-hf",
310-
"huggingface/meta-llama/Llama-2-7b-chat-hf",
311-
"huggingface/meta-llama/Llama-2-13b-hf",
312-
"huggingface/meta-llama/Llama-2-13b-chat-hf",
313-
"huggingface/meta-llama/Llama-2-70b-hf",
314-
"huggingface/meta-llama/Llama-2-70b-chat-hf",
315-
"huggingface/meta-llama/Llama-2-7b",
316-
"huggingface/meta-llama/Llama-2-7b-chat",
317-
"huggingface/meta-llama/Llama-2-13b",
318-
"huggingface/meta-llama/Llama-2-13b-chat",
319-
"huggingface/meta-llama/Llama-2-70b",
320-
"huggingface/meta-llama/Llama-2-70b-chat",
321-
"together_ai/togethercomputer/llama-2-70b-chat",
322-
"together_ai/togethercomputer/Llama-2-7B-32K-Instruct",
323-
"together_ai/togethercomputer/llama-2-7b",
324-
"baseten/qvv0xeq",
325-
"baseten/q841o8w",
326-
"baseten/31dxrj3",
327-
"openrouter/google/palm-2-codechat-bison",
328-
"openrouter/google/palm-2-chat-bison",
329-
"openrouter/openai/gpt-3.5-turbo",
330-
"openrouter/openai/gpt-3.5-turbo-16k",
331-
"openrouter/openai/gpt-4-32k",
332-
"openrouter/anthropic/claude-2",
333-
"openrouter/anthropic/claude-instant-v1",
334-
"openrouter/meta-llama/llama-2-13b-chat",
335-
"openrouter/meta-llama/llama-2-70b-chat",
336-
"vertex_ai/chat-bison",
337-
"vertex_ai/chat-bison@001",
338-
"vertex_ai/text-bison",
339-
"vertex_ai/text-bison@001",
340-
"ai21/j2-ultra",
341-
"ai21/j2-mid",
342-
"ai21/j2-light"
343-
],
344-
"description": "Specify llm model name."
345-
},
346-
{
347-
"type": "string",
348-
"description": "[required*] Specify llm model name."
349-
}
350-
]
351-
},
352-
"api_key": {
353-
"type": "string",
354-
"default": "",
355-
"description": "[required*] Specify access key for selected provider."
356-
},
357-
"api_base": {
358-
"type": "string",
359-
"default": "",
360-
"description": "[optional*] Specify the api base for selected provider. Leave it blank if you want to use default api base."
361-
},
362-
"temperature": {
363-
"type": "number",
364-
"default": 0.3,
365-
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
366-
},
367-
"max_tokens": {
368-
"type": "number",
369-
"default": 1000,
370-
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
371-
},
372-
"presence_penalty": {
373-
"type": "number",
374-
"default": 0,
375-
"description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."
376-
},
377-
"frequency_penalty": {
378-
"type": "number",
379-
"default": 0,
380-
"description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."
381-
}
382-
},
383-
"additionalProperties": false
384-
},
385-
"order": 6,
386-
"markdownDescription": "Specify the custom llm model for DevChat."
387-
},
388265
"devchat.defaultModel": {
389266
"oneOf": [
390267
{

src/extension.ts

Lines changed: 24 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -35,9 +35,7 @@ import { FT } from './util/feature_flags/feature_toggles';
3535

3636
async function configUpdateTo_0912() {
3737
const defaultModel: any = UiUtilWrapper.getConfiguration("devchat", "defaultModel");
38-
if (!defaultModel) {
39-
vscode.workspace.getConfiguration("devchat").update("defaultModel", "gpt-3.5-turbo", vscode.ConfigurationTarget.Global);
40-
}
38+
4139

4240
let devchatKey = UiUtilWrapper.getConfiguration('DevChat', 'Access_Key_DevChat');
4341
let openaiKey = UiUtilWrapper.getConfiguration('DevChat', 'Api_Key_OpenAI');
@@ -79,6 +77,10 @@ async function configUpdateTo_0912() {
7977
modelConfigNew["provider"] = "openai";
8078
}
8179

80+
if (!defaultModel) {
81+
vscode.workspace.getConfiguration("devchat").update("defaultModel", "gpt-3.5-turbo", vscode.ConfigurationTarget.Global);
82+
}
83+
8284
try {
8385
vscode.workspace.getConfiguration("devchat").update("Model.gpt-3-5", modelConfigNew, vscode.ConfigurationTarget.Global);
8486
vscode.workspace.getConfiguration("devchat").update("Model.gpt-3-5-16k", modelConfigNew, vscode.ConfigurationTarget.Global);
@@ -87,6 +89,25 @@ async function configUpdateTo_0912() {
8789
return;
8890
}
8991
}
92+
93+
const modelConfig4: any = UiUtilWrapper.getConfiguration("devchat", "Model.claude-2");
94+
if (Object.keys(modelConfig4).length === 0) {
95+
modelConfigNew = {};
96+
if (devchatKey) {
97+
modelConfigNew["api_key"] = devchatKey;
98+
} else if (openaiKey) {
99+
modelConfigNew["api_key"] = openaiKey;
100+
}
101+
102+
if (modelConfigNew["api_key"].startsWith("DC.")) {
103+
if (!defaultModel) {
104+
vscode.workspace.getConfiguration("devchat").update("defaultModel", "claude-2", vscode.ConfigurationTarget.Global);
105+
}
106+
107+
modelConfigNew["provider"] = "anthropic";
108+
vscode.workspace.getConfiguration("devchat").update("Model.claude-2", modelConfigNew, vscode.ConfigurationTarget.Global);
109+
}
110+
}
90111
}
91112

92113

0 commit comments

Comments
 (0)