You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
unknown_keys= [] # keys that are unknown to litellm
178
+
# unknown_keys = [] # keys that are unknown to litellm
177
179
unsupported_openai_params= [] # openai params that are known to litellm but not supported for this model/provider
178
180
forkeyinbackend_specific.keys():
179
-
ifkeynotinstandard_openai_subset.keys():
180
-
unknown_keys.append(key)
181
-
182
-
elifkeynotinsupported_params:
181
+
ifkeynotinsupported_params:
183
182
unsupported_openai_params.append(key)
184
183
185
-
iflen(unknown_keys) >0:
186
-
FancyLogger.get_logger().warning(
187
-
f"litellm allows for unknown / non-openai input params; mellea won't validate the following params that may cause issues: {', '.join(unknown_keys)}"
188
-
)
184
+
# if len(unknown_keys) > 0:
185
+
# FancyLogger.get_logger().warning(
186
+
# f"litellm allows for unknown / non-openai input params; mellea won't validate the following params that may cause issues: {', '.join(unknown_keys)}"
187
+
# )
189
188
190
189
iflen(unsupported_openai_params) >0:
191
190
FancyLogger.get_logger().warning(
192
191
f"litellm will automatically drop the following openai keys that aren't supported by the current model/provider: {', '.join(unsupported_openai_params)}"
def_extract_tools(action, format, model_opts, tool_calls):
297
+
def_extract_tools(action, format, model_opts, tool_calls)->dict[str, Callable]:
295
298
tools: dict[str, Callable] =dict()
296
299
iftool_calls:
297
300
ifformat:
298
301
FancyLogger.get_logger().warning(
299
302
f"Tool calling typically uses constrained generation, but you have specified a `format` in your generate call. NB: tool calling is superseded by format; we will NOT call tools for your request: {action}"
# invariant re: relationship between the model_options set of tools and the TemplateRepresentation set of tools
312
-
assertfn_namenotintools.keys(), (
313
-
f"Cannot add tool {fn_name} because that tool was already defined in the TemplateRepresentation for the action."
314
-
)
315
-
# type checking because ModelOptions is an untyped dict and the calling convention for tools isn't clearly documented at our abstraction boundaries.
316
-
asserttype(fn_name) isstr, (
317
-
"When providing a `ModelOption.TOOLS` parameter to `model_options`, always used the type Dict[str, Callable] where `str` is the function name and the callable is the function."
318
-
)
319
-
assertcallable(model_options_tools[fn_name]), (
320
-
"When providing a `ModelOption.TOOLS` parameter to `model_options`, always used the type Dict[str, Callable] where `str` is the function name and the callable is the function."
321
-
)
322
-
# Add the model_options tool to the existing set of tools.
323
-
tools[fn_name] =model_options_tools[fn_name]
305
+
add_tools_from_context_actions(tools, [action])
306
+
add_tools_from_model_options(tools, model_opts)
324
307
returntools
325
308
326
309
def_generate_from_raw(
@@ -333,68 +316,6 @@ def _generate_from_raw(
333
316
) ->list[ModelOutputThunk]:
334
317
"""Generate using the completions api. Gives the input provided to the model without templating."""
335
318
raiseNotImplementedError("This method is not implemented yet.")
336
-
# extra_body = {}
337
-
# if format is not None:
338
-
# FancyLogger.get_logger().warning(
339
-
# "The official OpenAI completion api does not accept response format / structured decoding; "
340
-
# "it will be passed as an extra arg."
341
-
# )
342
-
#
343
-
# # Some versions (like vllm's version) of the OpenAI API support structured decoding for completions requests.
0 commit comments