|
8 | 8 | "model": {
|
9 | 9 | "type": "string",
|
10 | 10 | "title": "Model",
|
11 |
| - "description": "The HuggingFace model to deploy (Hint: For a simple, lightweight demo try ise-uiuc/Magicoder-S-DS-6.7B)" |
| 11 | + "description": "The [HuggingFace model](https://huggingface.co/models) to deploy (see [here](https://github.com/stackhpc/azimuth-llm?tab=readme-ov-file#tested-models) for a list of tested models)." |
12 | 12 | },
|
13 | 13 | "token": {
|
14 | 14 | "type": "string",
|
15 | 15 | "title": "Access Token",
|
16 |
| - "description": "The HuggingFace access token to use for installing gated models.", |
| 16 | + "description": "A HuggingFace [access token](https://huggingface.co/docs/hub/security-tokens). Only required for [gated models](https://huggingface.co/docs/hub/en/models-gated (e.g. Llama 2).", |
17 | 17 | "default": ""
|
18 | 18 | }
|
19 | 19 | },
|
|
28 | 28 | "model_name": {
|
29 | 29 | "type": "string",
|
30 | 30 | "title": "Model Name",
|
31 |
| - "description": "Model name supplied to OpenAI client in frontend web app. Should match huggingface.model above." |
| 31 | + "description": "Model name supplied to the OpenAI client in frontend web app. Should match huggingface.model above.", |
| 32 | + "default": "mistralai/Mistral-7B-Instruct-v0.2" |
32 | 33 | },
|
33 | 34 | "model_instruction": {
|
34 | 35 | "type": "string",
|
35 | 36 | "title": "Instruction",
|
36 |
| - "description": "The initial model prompt (i.e. the hidden instructions) to use when generating responses." |
| 37 | + "description": "The initial model prompt (i.e. the hidden instructions) to use when generating responses.", |
| 38 | + "default": "You are a helpful AI assistant. Please respond appropriately." |
37 | 39 | },
|
38 | 40 | "page_title": {
|
39 | 41 | "type": "string",
|
40 | 42 | "title": "Page Title",
|
41 |
| - "description": "The title to use for the chat interface." |
| 43 | + "description": "The title to use for the chat interface.", |
| 44 | + "default": "Large Language Model" |
42 | 45 | },
|
43 | 46 | "llm_max_tokens": {
|
44 | 47 | "type": "number",
|
45 | 48 | "title": "Max Tokens",
|
46 |
| - "description": "The maximum number of new [tokens](https://platform.openai.com/docs/api-reference/chat/create#chat-create-max_tokens) to generate for each LLM responses." |
| 49 | + "description": "The maximum number of new [tokens](https://platform.openai.com/docs/api-reference/chat/create#chat-create-max_tokens) to generate for each LLM responses.", |
| 50 | + "default": 1000 |
47 | 51 | },
|
48 | 52 | "llm_temperature": {
|
49 | 53 | "type": "number",
|
50 | 54 | "title": "LLM Temperature",
|
51 |
| - "description": "The '[temperature](https://platform.openai.com/docs/api-reference/chat/create#chat-create-temperature)' value to use when generating LLM responses." |
| 55 | + "description": "The [temperature](https://platform.openai.com/docs/api-reference/chat/create#chat-create-temperature) value to use when generating LLM responses.", |
| 56 | + "default": 1 |
52 | 57 | },
|
53 | 58 | "llm_top_p": {
|
54 | 59 | "type": "number",
|
55 | 60 | "title": "LLM Top P",
|
56 |
| - "description": "The [top p](https://platform.openai.com/docs/api-reference/chat/create#chat-create-top_p) value to use when generating LLM responses." |
| 61 | + "description": "The [top p](https://platform.openai.com/docs/api-reference/chat/create#chat-create-top_p) value to use when generating LLM responses.", |
| 62 | + "default": 1 |
57 | 63 | },
|
58 | 64 | "llm_presence_penalty": {
|
59 | 65 | "type": "number",
|
60 | 66 | "title": "LLM Presence Penalty",
|
61 |
| - "description": "The [presence penalty](https://platform.openai.com/docs/api-reference/chat/create#chat-create-presence_penalty) to use when generating LLM responses." |
| 67 | + "description": "The [presence penalty](https://platform.openai.com/docs/api-reference/chat/create#chat-create-presence_penalty) to use when generating LLM responses.", |
| 68 | + "default": 0 |
62 | 69 | },
|
63 | 70 | "llm_frequency_penalty": {
|
64 | 71 | "type": "number",
|
65 | 72 | "title": "LLM Frequency Penalty",
|
66 |
| - "description": "The [frequency_penalty](https://platform.openai.com/docs/api-reference/chat/create#chat-create-frequency_penalty) to use when generating LLM responses." |
| 73 | + "description": "The [frequency_penalty](https://platform.openai.com/docs/api-reference/chat/create#chat-create-frequency_penalty) to use when generating LLM responses.", |
| 74 | + "default": 0 |
67 | 75 | }
|
68 | 76 |
|
69 | 77 | },
|
|
0 commit comments