|
69 | 69 | "description": "The title to display at the top of the chat interface.",
|
70 | 70 | "default": "Large Language Model"
|
71 | 71 | },
|
72 |
| - "llm_max_tokens": { |
73 |
| - "type": "integer", |
74 |
| - "title": "Max Tokens", |
75 |
| - "description": "The maximum number of new [tokens](https://platform.openai.com/docs/api-reference/chat/create#chat-create-max_tokens) to generate for each LLM responses.", |
76 |
| - "default": 1000 |
77 |
| - }, |
78 |
| - "llm_temperature": { |
79 |
| - "type": "number", |
80 |
| - "title": "LLM Temperature", |
81 |
| - "description": "The [temperature](https://platform.openai.com/docs/api-reference/chat/create#chat-create-temperature) value to use when generating LLM responses.", |
82 |
| - "default": 0, |
83 |
| - "minimum": 0, |
84 |
| - "maximum": 2 |
85 |
| - }, |
86 |
| - "llm_top_p": { |
87 |
| - "type": "number", |
88 |
| - "title": "LLM Top P", |
89 |
| - "description": "The [top p](https://platform.openai.com/docs/api-reference/chat/create#chat-create-top_p) value to use when generating LLM responses.", |
90 |
| - "default": 1, |
91 |
| - "exclusiveMinimum": 0, |
92 |
| - "maximum": 1 |
93 |
| - }, |
94 |
| - "llm_top_k": { |
95 |
| - "type": "integer", |
96 |
| - "title": "LLM Top K", |
97 |
| - "description": "The [top k](https://docs.vllm.ai/en/stable/dev/sampling_params.html) value to use when generating LLM responses (must be an integer).", |
98 |
| - "default": -1, |
99 |
| - "minimum": -1 |
100 |
| - }, |
101 |
| - "llm_presence_penalty": { |
102 |
| - "type": "number", |
103 |
| - "title": "LLM Presence Penalty", |
104 |
| - "description": "The [presence penalty](https://platform.openai.com/docs/api-reference/chat/create#chat-create-presence_penalty) to use when generating LLM responses.", |
105 |
| - "default": 0, |
106 |
| - "minimum": -2, |
107 |
| - "maximum": 2 |
108 |
| - }, |
109 |
| - "llm_frequency_penalty": { |
110 |
| - "type": "number", |
111 |
| - "title": "LLM Frequency Penalty", |
112 |
| - "description": "The [frequency_penalty](https://platform.openai.com/docs/api-reference/chat/create#chat-create-frequency_penalty) to use when generating LLM responses.", |
113 |
| - "default": 0, |
114 |
| - "minimum": -2, |
115 |
| - "maximum": 2 |
| 72 | + "llm_params": { |
| 73 | + "type": "object", |
| 74 | + "properties": { |
| 75 | + "max_tokens": { |
| 76 | + "type": "integer", |
| 77 | + "title": "Max Tokens", |
| 78 | + "description": "The maximum number of new [tokens](https://platform.openai.com/docs/api-reference/chat/create#chat-create-max_tokens) to generate for each LLM responses.", |
| 79 | + "default": 1000 |
| 80 | + }, |
| 81 | + "temperature": { |
| 82 | + "type": "number", |
| 83 | + "title": "LLM Temperature", |
| 84 | + "description": "The [temperature](https://platform.openai.com/docs/api-reference/chat/create#chat-create-temperature) value to use when generating LLM responses.", |
| 85 | + "default": 0, |
| 86 | + "minimum": 0, |
| 87 | + "maximum": 2 |
| 88 | + }, |
| 89 | + "top_p": { |
| 90 | + "type": "number", |
| 91 | + "title": "LLM Top P", |
| 92 | + "description": "The [top p](https://platform.openai.com/docs/api-reference/chat/create#chat-create-top_p) value to use when generating LLM responses.", |
| 93 | + "default": 1, |
| 94 | + "exclusiveMinimum": 0, |
| 95 | + "maximum": 1 |
| 96 | + }, |
| 97 | + "top_k": { |
| 98 | + "type": "integer", |
| 99 | + "title": "LLM Top K", |
| 100 | + "description": "The [top k](https://docs.vllm.ai/en/stable/dev/sampling_params.html) value to use when generating LLM responses (must be an integer).", |
| 101 | + "default": -1, |
| 102 | + "minimum": -1 |
| 103 | + }, |
| 104 | + "presence_penalty": { |
| 105 | + "type": "number", |
| 106 | + "title": "LLM Presence Penalty", |
| 107 | + "description": "The [presence penalty](https://platform.openai.com/docs/api-reference/chat/create#chat-create-presence_penalty) to use when generating LLM responses.", |
| 108 | + "default": 0, |
| 109 | + "minimum": -2, |
| 110 | + "maximum": 2 |
| 111 | + }, |
| 112 | + "frequency_penalty": { |
| 113 | + "type": "number", |
| 114 | + "title": "LLM Frequency Penalty", |
| 115 | + "description": "The [frequency_penalty](https://platform.openai.com/docs/api-reference/chat/create#chat-create-frequency_penalty) to use when generating LLM responses.", |
| 116 | + "default": 0, |
| 117 | + "minimum": -2, |
| 118 | + "maximum": 2 |
| 119 | + } |
| 120 | + } |
116 | 121 | }
|
117 | 122 | },
|
118 | 123 | "required": [
|
|
0 commit comments