|
1 | | -## Set to any of the following values: gemini, openai, watsonx |
2 | | -## You'll need to set corresponding APIKeys below to use the selected LLM |
| 1 | +################################################################################ |
| 2 | +# AGENTICS LLM CONFIGURATION |
| 3 | +################################################################################ |
| 4 | +# This file contains all the environment variables needed to configure LLM |
| 5 | +# providers for use with Agentics. All LLM providers are optional - configure |
| 6 | +# only the ones you need. The system will auto-discover which LLMs are available |
| 7 | +# based on the environment variables present. |
| 8 | +################################################################################ |
3 | 9 |
|
4 | | -SELECTED_LLM = "gemini" |
5 | 10 |
|
6 | | -## GEMINI |
| 11 | +################################################################################ |
| 12 | +# GEMINI (Optional) |
| 13 | +################################################################################ |
7 | 14 | GEMINI_API_KEY= |
8 | | -GEMINI_MODEL_ID= "gemini/gemini-2.0-flash" |
| 15 | +GEMINI_MODEL_ID="gemini/gemini-2.0-flash" |
9 | 16 |
|
10 | 17 |
|
11 | | -## Set one of the following LLM providers before using agentics |
12 | | -## WatsonX AI credentials (Optional) |
| 18 | +################################################################################ |
| 19 | +# OPENAI (Optional) |
| 20 | +################################################################################ |
| 21 | +OPENAI_API_KEY= |
| 22 | +OPENAI_MODEL_ID="openai/gpt-4" |
| 23 | + |
| 24 | + |
| 25 | +################################################################################ |
| 26 | +# OPENAI-COMPATIBLE LLMs (Optional) |
| 27 | +# |
| 28 | +# Configure OpenAI-compatible API providers with custom prefixes. |
| 29 | +# |
| 30 | +# Supported patterns: |
| 31 | +# - Default variant: OPENAI_COMPATIBLE_* |
| 32 | +# - Custom variants: OPENAI_COMPATIBLE_<VARIANT>_* |
| 33 | +# |
| 34 | +# Each variant requires three variables: |
| 35 | +# - <PREFIX>_API_KEY: Your API key |
| 36 | +# - <PREFIX>_MODEL_ID: The model identifier |
| 37 | +# - <PREFIX>_BASE_URL: The base URL of the API |
| 38 | +# |
| 39 | +# Examples: |
| 40 | +################################################################################ |
| 41 | + |
| 42 | +# Default OpenAI-compatible variant |
| 43 | +OPENAI_COMPATIBLE_API_KEY= |
| 44 | +OPENAI_COMPATIBLE_MODEL_ID= |
| 45 | +OPENAI_COMPATIBLE_BASE_URL= |
| 46 | + |
| 47 | + |
| 48 | +################################################################################ |
| 49 | +# WATSONX (Optional) |
| 50 | +################################################################################ |
13 | 51 | MODEL_ID=watsonx/meta-llama/llama-3-3-70b-instruct |
14 | | -WATSONX_URL=https://us-south.ml.cloud.ibm.com |
| 52 | +WATSONX_URL=https://us-south.ml.cloud.ibm.com |
15 | 53 | WATSONX_APIKEY= |
16 | 54 | WATSONX_PROJECTID= |
17 | 55 |
|
18 | | -## OpenAI credentials (Optional) |
19 | | -OPENAI_API_KEY= |
20 | | -OPENAI_MODEL_ID="openai/gpt-4" |
21 | 56 |
|
22 | | -## VLLM (Optional) |
23 | | -VLLM_URL=<http://base_url:PORT/v1> |
24 | | -VLLM_MODEL_ID="hosted_vllm/meta-llama/Llama-3.3-70B-Instruct" |
| 57 | +################################################################################ |
| 58 | +# VLLM - vLLM Server (Optional) |
| 59 | +# |
| 60 | +# vLLM is an open-source LLM serving framework. |
| 61 | +# Configure if you have a vLLM server running locally or remotely. |
| 62 | +################################################################################ |
| 63 | + |
| 64 | +# Required: vLLM server URL |
| 65 | +VLLM_URL=http://localhost:8000/v1 |
| 66 | + |
| 67 | +# Optional: Specific model ID for vLLM |
| 68 | +VLLM_MODEL_ID="meta-llama/Llama-3.3-70B-Instruct" |
| 69 | + |
| 70 | + |
| 71 | +################################################################################ |
| 72 | +# OLLAMA (Optional) |
| 73 | +# |
| 74 | +# Ollama is an open-source LLM runner that can run models locally. |
| 75 | +# Configure if you have Ollama installed and running. |
| 76 | +################################################################################ |
| 77 | + |
| 78 | +# The model to use with Ollama |
| 79 | +# OLLAMA_MODEL_ID="deepseek-r1:latest" |
| 80 | +# OLLAMA_MODEL_ID="llama2:latest" |
| 81 | +# OLLAMA_MODEL_ID="mistral:latest" |
| 82 | + |
| 83 | + |
| 84 | +################################################################################ |
| 85 | +# LITELLM (Optional) |
| 86 | +# |
| 87 | +# LiteLLM provides access to 100+ LLM providers through a unified interface. |
| 88 | +# Use model format: "litellm/provider/model-name" |
| 89 | +# See: https://docs.litellm.ai/docs/providers |
| 90 | +################################################################################ |
| 91 | + |
| 92 | +# Model in format: litellm/provider/model-name |
| 93 | +# LITELLM_MODEL="litellm/openai/gpt-4" |
| 94 | +# LITELLM_MODEL="litellm/anthropic/claude-3-sonnet" |
| 95 | +# LITELLM_MODEL="litellm/cohere/command-r" |
| 96 | + |
| 97 | +# Optional temperature and top_p settings |
| 98 | +# LITELLM_TEMPERATURE=0.8 |
| 99 | +# LITELLM_TOP_P=0.9 |
| 100 | + |
| 101 | + |
| 102 | +################################################################################ |
| 103 | +# LITELLM PROXY (Optional) |
| 104 | +# |
| 105 | +# LiteLLM Proxy allows you to run a local proxy server that manages API keys |
| 106 | +# and provides a unified interface for multiple LLM providers. In some |
| 107 | +# organizations this also provides a way to centralize frontier LLM |
| 108 | +# access to employees. |
| 109 | +# |
| 110 | +################################################################################ |
| 111 | + |
| 112 | +# Required: Proxy server URL (typically http://localhost:4000 if running locally, or |
| 113 | +https://litellm.your.organization/, no v1) |
| 114 | +# LITELLM_PROXY_URL=http://localhost:4000 |
25 | 115 |
|
26 | | -## OLLAMA (Optional) |
27 | | -OLLAMA_MODEL_ID="ollama/deepseek-r1:latest" |
| 116 | +# Required: API key for proxy authentication |
| 117 | +# LITELLM_PROXY_API_KEY= |
28 | 118 |
|
| 119 | +################################################################################ |
| 120 | +# Models |
| 121 | +# Once you've defined the above environment variables, you can run this command |
| 122 | +# to check the available models. |
| 123 | +# uvx --from litellm[proxy] litellm-proxy models list |
| 124 | +# |
| 125 | +# This command will display all available models configured on your proxy. |
| 126 | +################################################################################ |
29 | 127 |
|
30 | | -#### CrewAI |
| 128 | +# Required: Model name in format: litellm_proxy/<model_name> |
| 129 | +# Must start with "litellm_proxy/" |
| 130 | +# LITELLM_PROXY_MODEL="litellm_proxy/gpt-4" |
| 131 | +# LITELLM_PROXY_MODEL="litellm_proxy/claude-3-sonnet" |
31 | 132 |
|
32 | | -CREWAI_DISABLE_TELEMETRY=true |
33 | | -CREWAI_DISABLE_TRACING=true |
34 | | -CREWAI_TELEMETRY=false |
35 | | -OTEL_SDK_DISABLED=true |
36 | | -CREWAI_TRACING_DISABLED=true |
37 | | -CREWAI_SILENT=true |
38 | | -AGENTICS_TRACE_MODE="off" |
| 133 | +# Optional temperature and top_p settings |
| 134 | +# LITELLM_PROXY_TEMPERATURE=0.8 |
| 135 | +# LITELLM_PROXY_TOP_P=0.9 |
0 commit comments