From f2b4b70f5a319bf7d059117033dc304333d303b9 Mon Sep 17 00:00:00 2001 From: LJ Date: Sun, 22 Jun 2025 22:03:35 -0700 Subject: [PATCH] docs(litellm): revise to use examples not covered by other supported APIs --- docs/docs/ai/llm.mdx | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/docs/docs/ai/llm.mdx b/docs/docs/ai/llm.mdx index 1df420aa7..804b50805 100644 --- a/docs/docs/ai/llm.mdx +++ b/docs/docs/ai/llm.mdx @@ -233,35 +233,35 @@ pip install 'litellm[proxy]' #### 2. Create a `config.yml` for LiteLLM -**Example for OpenAI:** +**Example for DeepSeek:** + +Use this in your `config.yml`: + ```yaml model_list: - - model_name: "*" + - model_name: deepseek-chat litellm_params: - model: openai/* - api_key: os.environ/LITELLM_API_KEY + model: deepseek/deepseek-chat + api_key: os.environ/DEEPSEEK_API_KEY ``` -**Example for DeepSeek:** +You need to set the environment variable `DEEPSEEK_API_KEY` to your DeepSeek API key. -First, pull the DeepSeek model with Ollama: -```bash -ollama pull deepseek-r1 -``` -Then run it if it's not running: -```bash -ollama run deepseek-r1 -``` +**Example for Groq:** + +Use this in your `config.yml`: -Then, use this in your `config.yml`: ```yaml model_list: - - model_name: "deepseek-r1" + - model_name: groq-llama-3.3-70b-versatile litellm_params: - model: "ollama_chat/deepseek-r1" - api_base: "http://localhost:11434" + model: groq/llama-3.3-70b-versatile + api_key: "os.environ/GROQ_API_KEY" ``` +You need to set the environment variable `GROQ_API_KEY` to your Groq API key. + + #### 3. Run LiteLLM Proxy ```bash @@ -276,7 +276,7 @@ litellm --config config.yml ```python cocoindex.LlmSpec( api_type=cocoindex.LlmApiType.LITE_LLM, - model="deepseek-r1", + model="deepseek-chat", address="http://127.0.0.1:4000", # default url of LiteLLM ) ```