Skip to content

Commit 9d25e8b

Browse files
committed
Merge branch 'main' into dev
2 parents 91721a0 + b68e136 commit 9d25e8b

File tree

3 files changed

+81
-20
lines changed

3 files changed

+81
-20
lines changed

api-keys.env.template

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,19 @@
11
# OpenAI Configuration
22
OPENAI_ENABLED=true
33
OPENAI_API_KEY=#your-openai-api-key
4-
OPENAI_MODELS=gpt-4o,gpt-4o-mini # comma separated list of models
4+
OPENAI_MODELS=gpt-4.5-preview,o3-mini,gpt-4o-mini,o1,o1-mini # comma separated list of models
55

66
# Azure OpenAI Configuration
77
AZURE_ENABLED=true
88
AZURE_API_KEY=#your-azure-openai-api-key
99
AZURE_API_BASE=https://your-azure-openai-endpoint.openai.azure.com/
1010
AZURE_API_VERSION=2024-02-15-preview
11-
AZURE_MODELS=gpt-4o
11+
AZURE_MODELS=o3-mini,gpt-4o
1212

1313
# Anthropic Configuration
1414
ANTHROPIC_ENABLED=true
1515
ANTHROPIC_API_KEY=#your-anthropic-api-key
16-
ANTHROPIC_MODELS=claude-3-5-sonnet-20241022,claude-3-5-haiku-20241022
16+
ANTHROPIC_MODELS=claude-3-7-sonnet-latest,claude-3-5-haiku-latest
1717

1818
# Ollama Configuration
1919
OLLAMA_ENABLED=true

py-src/data_formulator/agents/client_utils.py

Lines changed: 13 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -16,9 +16,11 @@ def __init__(self, endpoint, model, api_key=None, api_base=None, api_version=No
1616
# other params, including temperature, max_completion_tokens, api_base, api_version
1717
self.params = {
1818
"temperature": 0.7,
19-
"max_completion_tokens": 1200,
2019
}
2120

21+
if not (model == "o3-mini" or model == "o1"):
22+
self.params["max_completion_tokens"] = 1200
23+
2224
if api_key is not None and api_key != "":
2325
self.params["api_key"] = api_key
2426
if api_base is not None and api_base != "":
@@ -67,12 +69,16 @@ def get_completion(self, messages):
6769
timeout=120
6870
)
6971

70-
return client.chat.completions.create(
71-
model=self.model,
72-
messages=messages,
73-
temperature=self.params["temperature"],
74-
max_tokens=self.params["max_completion_tokens"],
75-
)
72+
completion_params = {
73+
"model": self.model,
74+
"messages": messages,
75+
}
76+
77+
if not (self.model == "o3-mini" or self.model == "o1"):
78+
completion_params["temperature"] = self.params["temperature"]
79+
completion_params["max_tokens"] = self.params["max_completion_tokens"]
80+
81+
return client.chat.completions.create(**completion_params)
7682
else:
7783
return litellm.completion(
7884
model=self.model,

src/views/ModelSelectionDialog.tsx

Lines changed: 65 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,14 @@ export const ModelSelectionButton: React.FC<{}> = ({ }) => {
8181
const [modelDialogOpen, setModelDialogOpen] = useState<boolean>(false);
8282
const [showKeys, setShowKeys] = useState<boolean>(false);
8383
const [tempSelectedModelId, setTempSelectedModeId] = useState<string | undefined >(selectedModelId);
84+
const [providerModelOptions, setProviderModelOptions] = useState<{[key: string]: string[]}>({
85+
'openai': [],
86+
'azure': [],
87+
'anthropic': [],
88+
'gemini': [],
89+
'ollama': []
90+
});
91+
const [isLoadingModelOptions, setIsLoadingModelOptions] = useState<boolean>(false);
8492
const [appConfig, setAppConfig] = useState<AppConfig>({ SHOW_KEYS_ENABLED: true });
8593

8694
// Fetch app configuration
@@ -108,23 +116,60 @@ export const ModelSelectionButton: React.FC<{}> = ({ }) => {
108116
const [newApiBase, setNewApiBase] = useState<string | undefined>(undefined);
109117
const [newApiVersion, setNewApiVersion] = useState<string | undefined>(undefined);
110118

119+
// Fetch available models from the API
120+
useEffect(() => {
121+
const fetchModelOptions = async () => {
122+
setIsLoadingModelOptions(true);
123+
try {
124+
const response = await fetch(getUrls().CHECK_AVAILABLE_MODELS);
125+
const data = await response.json();
126+
127+
// Group models by provider
128+
const modelsByProvider: {[key: string]: string[]} = {
129+
'openai': [],
130+
'azure': [],
131+
'anthropic': [],
132+
'gemini': [],
133+
'ollama': []
134+
};
135+
136+
data.forEach((modelConfig: any) => {
137+
const provider = modelConfig.endpoint;
138+
const model = modelConfig.model;
139+
140+
if (provider && model && !modelsByProvider[provider].includes(model)) {
141+
modelsByProvider[provider].push(model);
142+
}
143+
});
144+
145+
setProviderModelOptions(modelsByProvider);
146+
} catch (error) {
147+
console.error("Failed to fetch model options:", error);
148+
} finally {
149+
setIsLoadingModelOptions(false);
150+
}
151+
};
152+
153+
fetchModelOptions();
154+
}, []);
155+
111156
useEffect(() => {
112157
if (newEndpoint == 'ollama') {
113158
if (!newApiBase) {
114159
setNewApiBase('http://localhost:11434');
115160
}
116161
}
117162
if (newEndpoint == "openai") {
118-
if (!newModel) {
119-
setNewModel('gpt-4o');
163+
if (!newModel && providerModelOptions.openai.length > 0) {
164+
setNewModel(providerModelOptions.openai[0]);
120165
}
121166
}
122167
if (newEndpoint == "anthropic") {
123-
if (!newModel) {
124-
setNewModel('claude-3-5-sonnet-20241022');
168+
if (!newModel && providerModelOptions.anthropic.length > 0) {
169+
setNewModel(providerModelOptions.anthropic[0]);
125170
}
126171
}
127-
}, [newEndpoint]);
172+
}, [newEndpoint, providerModelOptions]);
128173

129174
let modelExists = models.some(m =>
130175
m.endpoint == newEndpoint && m.model == newModel && m.api_base == newApiBase
@@ -168,8 +213,8 @@ export const ModelSelectionButton: React.FC<{}> = ({ }) => {
168213
value={newEndpoint}
169214
onChange={(event: any, newValue: string | null) => {
170215
setNewEndpoint(newValue || "");
171-
if (newModel == "" && newValue == "openai") {
172-
setNewModel("gpt-4o");
216+
if (newModel == "" && newValue == "openai" && providerModelOptions.openai.length > 0) {
217+
setNewModel(providerModelOptions.openai[0]);
173218
}
174219
if (!newApiVersion && newValue == "azure") {
175220
setNewApiVersion("2024-02-15");
@@ -220,7 +265,8 @@ export const ModelSelectionButton: React.FC<{}> = ({ }) => {
220265
freeSolo
221266
onChange={(event: any, newValue: string | null) => { setNewModel(newValue || ""); }}
222267
value={newModel}
223-
options={['gpt-4o-mini', 'gpt-4o', 'claude-3-5-sonnet-20241022']}
268+
options={newEndpoint && providerModelOptions[newEndpoint] ? providerModelOptions[newEndpoint] : []}
269+
loading={isLoadingModelOptions}
224270
renderOption={(props, option) => {
225271
return <Typography {...props} onClick={()=>{ setNewModel(option); }} sx={{fontSize: "small"}}>{option}</Typography>
226272
}}
@@ -229,7 +275,16 @@ export const ModelSelectionButton: React.FC<{}> = ({ }) => {
229275
error={newEndpoint != "" && !newModel}
230276
{...params}
231277
placeholder="model name"
232-
InputProps={{ ...params.InputProps, style: { fontSize: "0.875rem" } }}
278+
InputProps={{
279+
...params.InputProps,
280+
style: { fontSize: "0.875rem" },
281+
endAdornment: (
282+
<>
283+
{isLoadingModelOptions ? <CircularProgress color="inherit" size={20} /> : null}
284+
{params.InputProps.endAdornment}
285+
</>
286+
),
287+
}}
233288
inputProps={{
234289
...params.inputProps,
235290
'aria-label': 'Select or enter a model',
@@ -244,7 +299,7 @@ export const ModelSelectionButton: React.FC<{}> = ({ }) => {
244299
PaperComponent={({ children }) => (
245300
<Paper>
246301
<Typography sx={{ p: 1, color: 'gray', fontStyle: 'italic', fontSize: 'small' }}>
247-
examples
302+
{isLoadingModelOptions ? 'Loading models...' : 'examples'}
248303
</Typography>
249304
{children}
250305
</Paper>

0 commit comments

Comments
 (0)