Skip to content

Commit 456b221

Browse files
committed
Fix scheme for passing custom LLM params
1 parent 6020434 commit 456b221

File tree

2 files changed

+98
-88
lines changed

2 files changed

+98
-88
lines changed

charts/azimuth-chat/values.schema.json

Lines changed: 49 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -69,50 +69,55 @@
6969
"description": "The title to display at the top of the chat interface.",
7070
"default": "Large Language Model"
7171
},
72-
"llm_max_tokens": {
73-
"type": "integer",
74-
"title": "Max Tokens",
75-
"description": "The maximum number of new [tokens](https://platform.openai.com/docs/api-reference/chat/create#chat-create-max_tokens) to generate for each LLM responses.",
76-
"default": 1000
77-
},
78-
"llm_temperature": {
79-
"type": "number",
80-
"title": "LLM Temperature",
81-
"description": "The [temperature](https://platform.openai.com/docs/api-reference/chat/create#chat-create-temperature) value to use when generating LLM responses.",
82-
"default": 0,
83-
"minimum": 0,
84-
"maximum": 2
85-
},
86-
"llm_top_p": {
87-
"type": "number",
88-
"title": "LLM Top P",
89-
"description": "The [top p](https://platform.openai.com/docs/api-reference/chat/create#chat-create-top_p) value to use when generating LLM responses.",
90-
"default": 1,
91-
"exclusiveMinimum": 0,
92-
"maximum": 1
93-
},
94-
"llm_top_k": {
95-
"type": "integer",
96-
"title": "LLM Top K",
97-
"description": "The [top k](https://docs.vllm.ai/en/stable/dev/sampling_params.html) value to use when generating LLM responses (must be an integer).",
98-
"default": -1,
99-
"minimum": -1
100-
},
101-
"llm_presence_penalty": {
102-
"type": "number",
103-
"title": "LLM Presence Penalty",
104-
"description": "The [presence penalty](https://platform.openai.com/docs/api-reference/chat/create#chat-create-presence_penalty) to use when generating LLM responses.",
105-
"default": 0,
106-
"minimum": -2,
107-
"maximum": 2
108-
},
109-
"llm_frequency_penalty": {
110-
"type": "number",
111-
"title": "LLM Frequency Penalty",
112-
"description": "The [frequency_penalty](https://platform.openai.com/docs/api-reference/chat/create#chat-create-frequency_penalty) to use when generating LLM responses.",
113-
"default": 0,
114-
"minimum": -2,
115-
"maximum": 2
72+
"llm_params": {
73+
"type": "object",
74+
"properties": {
75+
"max_tokens": {
76+
"type": "integer",
77+
"title": "Max Tokens",
78+
"description": "The maximum number of new [tokens](https://platform.openai.com/docs/api-reference/chat/create#chat-create-max_tokens) to generate for each LLM responses.",
79+
"default": 1000
80+
},
81+
"temperature": {
82+
"type": "number",
83+
"title": "LLM Temperature",
84+
"description": "The [temperature](https://platform.openai.com/docs/api-reference/chat/create#chat-create-temperature) value to use when generating LLM responses.",
85+
"default": 0,
86+
"minimum": 0,
87+
"maximum": 2
88+
},
89+
"top_p": {
90+
"type": "number",
91+
"title": "LLM Top P",
92+
"description": "The [top p](https://platform.openai.com/docs/api-reference/chat/create#chat-create-top_p) value to use when generating LLM responses.",
93+
"default": 1,
94+
"exclusiveMinimum": 0,
95+
"maximum": 1
96+
},
97+
"top_k": {
98+
"type": "integer",
99+
"title": "LLM Top K",
100+
"description": "The [top k](https://docs.vllm.ai/en/stable/dev/sampling_params.html) value to use when generating LLM responses (must be an integer).",
101+
"default": -1,
102+
"minimum": -1
103+
},
104+
"presence_penalty": {
105+
"type": "number",
106+
"title": "LLM Presence Penalty",
107+
"description": "The [presence penalty](https://platform.openai.com/docs/api-reference/chat/create#chat-create-presence_penalty) to use when generating LLM responses.",
108+
"default": 0,
109+
"minimum": -2,
110+
"maximum": 2
111+
},
112+
"frequency_penalty": {
113+
"type": "number",
114+
"title": "LLM Frequency Penalty",
115+
"description": "The [frequency_penalty](https://platform.openai.com/docs/api-reference/chat/create#chat-create-frequency_penalty) to use when generating LLM responses.",
116+
"default": 0,
117+
"minimum": -2,
118+
"maximum": 2
119+
}
120+
}
116121
}
117122
},
118123
"required": [

charts/azimuth-image-analysis/values.schema.json

Lines changed: 49 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -53,50 +53,55 @@
5353
"title": "Model Name",
5454
"description": "Model name supplied to the OpenAI client in frontend web app. Should match huggingface.model above."
5555
},
56-
"llm_max_tokens": {
57-
"type": "integer",
58-
"title": "Max Tokens",
59-
"description": "The maximum number of new [tokens](https://platform.openai.com/docs/api-reference/chat/create#chat-create-max_tokens) to generate for each LLM responses.",
60-
"default": 1000
61-
},
62-
"llm_temperature": {
63-
"type": "number",
64-
"title": "LLM Temperature",
65-
"description": "The [temperature](https://platform.openai.com/docs/api-reference/chat/create#chat-create-temperature) value to use when generating LLM responses.",
66-
"default": 0,
67-
"minimum": 0,
68-
"maximum": 2
69-
},
70-
"llm_top_p": {
71-
"type": "number",
72-
"title": "LLM Top P",
73-
"description": "The [top p](https://platform.openai.com/docs/api-reference/chat/create#chat-create-top_p) value to use when generating LLM responses.",
74-
"default": 1,
75-
"exclusiveMinimum": 0,
76-
"maximum": 1
77-
},
78-
"llm_top_k": {
79-
"type": "integer",
80-
"title": "LLM Top K",
81-
"description": "The [top k](https://docs.vllm.ai/en/stable/dev/sampling_params.html) value to use when generating LLM responses (must be an integer).",
82-
"default": -1,
83-
"minimum": -1
84-
},
85-
"llm_presence_penalty": {
86-
"type": "number",
87-
"title": "LLM Presence Penalty",
88-
"description": "The [presence penalty](https://platform.openai.com/docs/api-reference/chat/create#chat-create-presence_penalty) to use when generating LLM responses.",
89-
"default": 0,
90-
"minimum": -2,
91-
"maximum": 2
92-
},
93-
"llm_frequency_penalty": {
94-
"type": "number",
95-
"title": "LLM Frequency Penalty",
96-
"description": "The [frequency_penalty](https://platform.openai.com/docs/api-reference/chat/create#chat-create-frequency_penalty) to use when generating LLM responses.",
97-
"default": 0,
98-
"minimum": -2,
99-
"maximum": 2
56+
"llm_params": {
57+
"type": "object",
58+
"properties": {
59+
"max_tokens": {
60+
"type": "integer",
61+
"title": "Max Tokens",
62+
"description": "The maximum number of new [tokens](https://platform.openai.com/docs/api-reference/chat/create#chat-create-max_tokens) to generate for each LLM responses.",
63+
"default": 1000
64+
},
65+
"temperature": {
66+
"type": "number",
67+
"title": "LLM Temperature",
68+
"description": "The [temperature](https://platform.openai.com/docs/api-reference/chat/create#chat-create-temperature) value to use when generating LLM responses.",
69+
"default": 0,
70+
"minimum": 0,
71+
"maximum": 2
72+
},
73+
"top_p": {
74+
"type": "number",
75+
"title": "LLM Top P",
76+
"description": "The [top p](https://platform.openai.com/docs/api-reference/chat/create#chat-create-top_p) value to use when generating LLM responses.",
77+
"default": 1,
78+
"exclusiveMinimum": 0,
79+
"maximum": 1
80+
},
81+
"top_k": {
82+
"type": "integer",
83+
"title": "LLM Top K",
84+
"description": "The [top k](https://docs.vllm.ai/en/stable/dev/sampling_params.html) value to use when generating LLM responses (must be an integer).",
85+
"default": -1,
86+
"minimum": -1
87+
},
88+
"presence_penalty": {
89+
"type": "number",
90+
"title": "LLM Presence Penalty",
91+
"description": "The [presence penalty](https://platform.openai.com/docs/api-reference/chat/create#chat-create-presence_penalty) to use when generating LLM responses.",
92+
"default": 0,
93+
"minimum": -2,
94+
"maximum": 2
95+
},
96+
"frequency_penalty": {
97+
"type": "number",
98+
"title": "LLM Frequency Penalty",
99+
"description": "The [frequency_penalty](https://platform.openai.com/docs/api-reference/chat/create#chat-create-frequency_penalty) to use when generating LLM responses.",
100+
"default": 0,
101+
"minimum": -2,
102+
"maximum": 2
103+
}
104+
}
100105
}
101106
},
102107
"required": [

0 commit comments

Comments
 (0)