Skip to content

Commit 5c0a4c9

Browse files
authored
feat(ollama): Add API key support for Ollama Cloud (#2278)
Add support for OLLAMA.API_KEY configuration to enable authentication with Ollama Cloud (ollama.com). Previously only local Ollama instances were supported without authentication. - Pass api_key to litellm completion calls when configured - Update secrets template with documentation for the new api_key field - Clarify api_base comment to distinguish between Ollama Cloud and local
1 parent 42d55d4 commit 5c0a4c9

File tree

2 files changed

+6
-1
lines changed

2 files changed

+6
-1
lines changed

pr_agent/algo/ai_handlers/litellm_ai_handler.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -82,6 +82,8 @@ def __init__(self):
8282
if get_settings().get("OLLAMA.API_BASE", None):
8383
litellm.api_base = get_settings().ollama.api_base
8484
self.api_base = get_settings().ollama.api_base
85+
if get_settings().get("OLLAMA.API_KEY", None):
86+
litellm.api_key = get_settings().ollama.api_key
8587
if get_settings().get("HUGGINGFACE.REPETITION_PENALTY", None):
8688
self.repetition_penalty = float(get_settings().huggingface.repetition_penalty)
8789
if get_settings().get("VERTEXAI.VERTEX_PROJECT", None):
@@ -404,6 +406,8 @@ async def chat_completion(self, model: str, system: str, user: str, temperature:
404406
get_logger().info(f"\nSystem prompt:\n{system}")
405407
get_logger().info(f"\nUser prompt:\n{user}")
406408

409+
kwargs["api_key"] = litellm.api_key
410+
407411
# Get completion with automatic streaming detection
408412
resp, finish_reason, response_obj = await self._get_completion(**kwargs)
409413

pr_agent/settings/.secrets_template.toml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,8 @@ key = "" # Optional, uncomment if you want to use Huggingface Inference API. Acq
5050
api_base = "" # the base url for your huggingface inference endpoint
5151

5252
[ollama]
53-
api_base = "" # the base url for your local Llama 2, Code Llama, and other models inference endpoint. Acquire through https://ollama.ai/
53+
api_base = "" # the base url for your Ollama endpoint, e.g. https://ollama.com for Ollama Cloud or http://localhost:11434 for local
54+
api_key = "" # required for Ollama Cloud (ollama.com); leave empty for local Ollama
5455

5556
[vertexai]
5657
vertex_project = "" # the google cloud platform project name for your vertexai deployment

0 commit comments

Comments
 (0)