We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 59956e0 commit 0ce93e9Copy full SHA for 0ce93e9
interpreter/terminal_interface/profiles/defaults/local.py
@@ -383,7 +383,7 @@ def list_ollama_models():
383
print("Model process terminated.")
384
385
# Set flags for Llamafile to work with interpreter
386
- interpreter.llm.model = "local"
+ interpreter.llm.model = "openai/local"
387
interpreter.llm.temperature = 0
388
interpreter.llm.api_base = "http://localhost:8080/v1"
389
interpreter.llm.supports_functions = False
0 commit comments