diff --git a/litellm/integrations/gitlab/__init__.py b/litellm/integrations/gitlab/__init__.py index cd22afc2ba05..4c4cf4d9e69f 100644 --- a/litellm/integrations/gitlab/__init__.py +++ b/litellm/integrations/gitlab/__init__.py @@ -8,7 +8,7 @@ from litellm.types.prompts.init_prompts import SupportedPromptIntegrations from litellm.integrations.custom_prompt_management import CustomPromptManagement from litellm.types.prompts.init_prompts import PromptSpec, PromptLiteLLMParams -from .gitlab_prompt_manager import GitLabPromptManager +from .gitlab_prompt_manager import GitLabPromptManager, GitLabPromptCache # Global instances global_gitlab_config: Optional[dict] = None @@ -90,6 +90,7 @@ def _gitlab_prompt_initializer( # Export public API __all__ = [ "GitLabPromptManager", + "GitLabPromptCache", "set_global_gitlab_config", "global_gitlab_config", ] diff --git a/litellm/integrations/gitlab/gitlab_prompt_manager.py b/litellm/integrations/gitlab/gitlab_prompt_manager.py index b782f10ccc51..a1cc6b2611f6 100644 --- a/litellm/integrations/gitlab/gitlab_prompt_manager.py +++ b/litellm/integrations/gitlab/gitlab_prompt_manager.py @@ -248,7 +248,7 @@ class GitLabPromptManager(CustomPromptManagement): "access_token": "glpat_***", "tag": "v1.2.3", # optional; takes precedence "branch": "main", # default fallback - "prompts_path": "prompts/chat" # <--- NEW + "prompts_path": "prompts/chat" } """ @@ -486,3 +486,138 @@ def get_chat_completion_prompt( prompt_label, prompt_version, ) + + +class GitLabPromptCache: + """ + Cache all .prompt files from a GitLab repo into memory. + + - Keys are the *repo file paths* (e.g. "prompts/chat/greet/hi.prompt") + mapped to JSON-like dicts containing content + metadata. + - Also exposes a by-ID view (ID == path relative to prompts_path without ".prompt", + e.g. "greet/hi"). + + Usage: + + cfg = { + "project": "group/subgroup/repo", + "access_token": "glpat_***", + "prompts_path": "prompts/chat", # optional, can be empty for repo root + # "branch": "main", # default is "main" + # "tag": "v1.2.3", # takes precedence over branch + # "base_url": "https://gitlab.com/api/v4" # default + } + + cache = GitLabPromptCache(cfg) + cache.load_all() # fetch + parse all .prompt files + + print(cache.list_files()) # repo file paths + print(cache.list_ids()) # template IDs relative to prompts_path + + prompt_json = cache.get_by_file("prompts/chat/greet/hi.prompt") + prompt_json2 = cache.get_by_id("greet/hi") + + # If GitLab content changes and you want to refresh: + cache.reload() # re-scan and refresh all + """ + + def __init__( + self, + gitlab_config: Dict[str, Any], + *, + ref: Optional[str] = None, + gitlab_client: Optional[GitLabClient] = None, + ) -> None: + # Build a PromptManager (which internally builds TemplateManager + Client) + self.prompt_manager = GitLabPromptManager( + gitlab_config=gitlab_config, + prompt_id=None, + ref=ref, + gitlab_client=gitlab_client, + ) + self.template_manager: GitLabTemplateManager = self.prompt_manager.prompt_manager + + # In-memory stores + self._by_file: Dict[str, Dict[str, Any]] = {} + self._by_id: Dict[str, Dict[str, Any]] = {} + + # ------------------------- + # Public API + # ------------------------- + + def load_all(self, *, recursive: bool = True) -> Dict[str, Dict[str, Any]]: + """ + Scan GitLab for all .prompt files under prompts_path, load and parse each, + and return the mapping of repo file path -> JSON-like dict. + """ + ids = self.template_manager.list_templates(recursive=recursive) # IDs relative to prompts_path + for pid in ids: + # Ensure template is loaded into TemplateManager + if pid not in self.template_manager.prompts: + self.template_manager._load_prompt_from_gitlab(pid) + + tmpl = self.template_manager.get_template(pid) + if tmpl is None: + # If something raced/failed, try once more + self.template_manager._load_prompt_from_gitlab(pid) + tmpl = self.template_manager.get_template(pid) + if tmpl is None: + continue + + file_path = self.template_manager._id_to_repo_path(pid) # "prompts/chat/..../file.prompt" + entry = self._template_to_json(pid, tmpl) + + self._by_file[file_path] = entry + self._by_id[pid] = entry + + return self._by_id + + def reload(self, *, recursive: bool = True) -> Dict[str, Dict[str, Any]]: + """Clear the cache and re-load from GitLab.""" + self._by_file.clear() + self._by_id.clear() + return self.load_all(recursive=recursive) + + def list_files(self) -> List[str]: + """Return the repo file paths currently cached.""" + return list(self._by_file.keys()) + + def list_ids(self) -> List[str]: + """Return the template IDs (relative to prompts_path, without extension) currently cached.""" + return list(self._by_id.keys()) + + def get_by_file(self, file_path: str) -> Optional[Dict[str, Any]]: + """Get a cached prompt JSON by repo file path.""" + return self._by_file.get(file_path) + + def get_by_id(self, prompt_id: str) -> Optional[Dict[str, Any]]: + """Get a cached prompt JSON by prompt ID (relative to prompts_path).""" + return self._by_id.get(prompt_id) + + # ------------------------- + # Internals + # ------------------------- + + def _template_to_json(self, prompt_id: str, tmpl: GitLabPromptTemplate) -> Dict[str, Any]: + """ + Normalize a GitLabPromptTemplate into a JSON-like dict that is easy to serialize. + """ + # Safer copy of metadata (avoid accidental mutation) + md = dict(tmpl.metadata or {}) + + # Pull standard fields (also present in metadata sometimes) + model = tmpl.model + temperature = tmpl.temperature + max_tokens = tmpl.max_tokens + optional_params = dict(tmpl.optional_params or {}) + + return { + "id": prompt_id, # e.g. "greet/hi" + "path": self.template_manager._id_to_repo_path(prompt_id), # e.g. "prompts/chat/greet/hi.prompt" + "content": tmpl.content, # rendered content (without frontmatter) + "metadata": md, # parsed frontmatter + "model": model, + "temperature": temperature, + "max_tokens": max_tokens, + "optional_params": optional_params, + } diff --git a/litellm/proxy/prompts/prompt_endpoints.py b/litellm/proxy/prompts/prompt_endpoints.py index 9ab63895ae7c..9345709d9585 100644 --- a/litellm/proxy/prompts/prompt_endpoints.py +++ b/litellm/proxy/prompts/prompt_endpoints.py @@ -77,7 +77,7 @@ async def list_prompts( ``` """ from litellm.proxy._types import LitellmUserRoles - from litellm.proxy.prompts.prompt_registry import IN_MEMORY_PROMPT_REGISTRY + from litellm.proxy.prompts.prompt_registry import PROMPT_HUB # check key metadata for prompts key_metadata = user_api_key_dict.metadata @@ -86,9 +86,9 @@ async def list_prompts( if prompts is not None: return ListPromptsResponse( prompts=[ - IN_MEMORY_PROMPT_REGISTRY.IN_MEMORY_PROMPTS[prompt] + PROMPT_HUB.IN_MEMORY_PROMPTS[prompt] for prompt in prompts - if prompt in IN_MEMORY_PROMPT_REGISTRY.IN_MEMORY_PROMPTS + if prompt in PROMPT_HUB.IN_MEMORY_PROMPTS ] ) # check if user is proxy admin - show all prompts @@ -97,7 +97,7 @@ async def list_prompts( or user_api_key_dict.user_role == LitellmUserRoles.PROXY_ADMIN.value ): return ListPromptsResponse( - prompts=list(IN_MEMORY_PROMPT_REGISTRY.IN_MEMORY_PROMPTS.values()) + prompts=list(PROMPT_HUB.IN_MEMORY_PROMPTS.values()) ) else: return ListPromptsResponse(prompts=[]) @@ -148,7 +148,7 @@ async def get_prompt_info( } ``` """ - from litellm.proxy.prompts.prompt_registry import IN_MEMORY_PROMPT_REGISTRY + from litellm.proxy.prompts.prompt_registry import PROMPT_HUB ## CHECK IF USER HAS ACCESS TO PROMPT prompts: Optional[List[str]] = None @@ -169,14 +169,19 @@ async def get_prompt_info( detail=f"You are not authorized to access this prompt. Your role - {user_api_key_dict.user_role}, Your key's prompts - {prompts}", ) - prompt_spec = IN_MEMORY_PROMPT_REGISTRY.get_prompt_by_id(prompt_id) + prompt_spec = PROMPT_HUB.get_prompt_by_id(prompt_id) + verbose_proxy_logger.debug(f"found prompt with id {prompt_id}-->{prompt_spec}") if prompt_spec is None: raise HTTPException(status_code=400, detail=f"Prompt {prompt_id} not found") # Get prompt content from the callback prompt_template: Optional[PromptTemplateBase] = None try: - prompt_callback = IN_MEMORY_PROMPT_REGISTRY.get_prompt_callback_by_id(prompt_id) + prompt_callback = PROMPT_HUB.get_prompt_callback_by_id(prompt_id) + verbose_proxy_logger.debug( + f"Found the prompt callback for prompt id {prompt_id} --> {prompt_callback}" + ) + if prompt_callback is not None: # Extract content based on integration type integration_name = prompt_callback.integration_name @@ -196,6 +201,20 @@ async def get_prompt_info( content=template[template_id]["content"], metadata=template[template_id]["metadata"], ) + if integration_name == "gitlab": + from litellm.integrations.gitlab import ( + GitLabPromptManager, + ) + if isinstance(prompt_callback, GitLabPromptManager): + template = prompt_callback.prompt_manager.get_all_prompts_as_json() + if template is not None and len(template) == 1: + template_id = list(template.keys())[0] + prompt_template = PromptTemplateBase( + litellm_prompt_id=template_id, # id sent to prompt management tool + content=template[template_id]["content"], + metadata=template[template_id]["metadata"], + ) + except Exception: # If content extraction fails, continue without content diff --git a/litellm/proxy/prompts/prompt_registry.py b/litellm/proxy/prompts/prompt_registry.py index b4717687704e..ec676b73d657 100644 --- a/litellm/proxy/prompts/prompt_registry.py +++ b/litellm/proxy/prompts/prompt_registry.py @@ -1,16 +1,19 @@ import importlib import os from pathlib import Path -from typing import Callable, Dict, Optional +from typing import Callable, Dict, Optional, List, Tuple, Iterable from litellm._logging import verbose_proxy_logger from litellm.integrations.custom_prompt_management import CustomPromptManagement +from litellm.integrations.gitlab import GitLabPromptCache +from collections import OrderedDict from litellm.types.prompts.init_prompts import ( PromptInfo, PromptLiteLLMParams, - PromptSpec, + PromptSpec ) + prompt_initializer_registry = {} @@ -175,4 +178,371 @@ def get_prompt_callback_by_id( return self.prompt_id_to_custom_prompt.get(prompt_id) -IN_MEMORY_PROMPT_REGISTRY = InMemoryPromptRegistry() \ No newline at end of file +IN_MEMORY_PROMPT_REGISTRY = InMemoryPromptRegistry() + + +class GitlabPromptRegistry: + """ + Class that handles adding prompt callbacks to the CallbacksManager. + """ + + def __init__(self): + + self.gitlab_prompt_cache: Optional[GitLabPromptCache] = None + self.IN_MEMORY_PROMPTS: Dict[str, PromptSpec] = {} + """ + Prompt id to Prompt object mapping + """ + + self.prompt_id_to_custom_prompt: Dict[str, Optional[CustomPromptManagement]] = ( + {} + ) + + + """ + Guardrail id to CustomGuardrail object mapping + """ + + def load_all(self): + if not self.gitlab_prompt_cache: + import litellm + self.gitlab_prompt_cache:GitLabPromptCache = GitLabPromptCache( + litellm.global_gitlab_config + ) + + prompts_dict = self.gitlab_prompt_cache.load_all() + for prompt_id, prompt_json in prompts_dict.items(): + verbose_proxy_logger.debug( + f"{prompt_id} --> {prompt_json}" + ) + + prompt_info: PromptInfo = PromptInfo( + prompt_type="config", + model_config={ + 'content': prompt_json.get('content'), + 'metadata': prompt_json.get('metadata') + } + ) + prompt_params: PromptLiteLLMParams = PromptLiteLLMParams( + prompt_id=prompt_id, + prompt_integration="gitlab", + model_config={ + 'content': prompt_json.get('content'), + 'metadata': prompt_json.get('metadata') + } + ) + prompt_spec: PromptSpec = PromptSpec( + prompt_id=prompt_id, + litellm_params=prompt_params, + prompt_info=prompt_info + ) + self.IN_MEMORY_PROMPTS[prompt_id] = prompt_spec + self.initialize_prompt(prompt_spec) + + verbose_proxy_logger.debug( + f"found the gitlab prompts with these ids {list(set(self.IN_MEMORY_PROMPTS.keys()))}" + ) + return self.IN_MEMORY_PROMPTS + + def gitlab_prompt_id_to_litellm_prompt_id(self): + pass + + def initialize_prompt( + self, + prompt: PromptSpec, + config_file_path: Optional[str] = None, + ) -> Optional[PromptSpec]: + """ + Initialize a guardrail from a dictionary and add it to the litellm callback manager + + Returns a Guardrail object if the guardrail is initialized successfully + """ + import litellm + + prompt_id = prompt.prompt_id + if prompt_id in self.IN_MEMORY_PROMPTS: + verbose_proxy_logger.debug("prompt_id already exists in IN_MEMORY_PROMPTS") + return self.IN_MEMORY_PROMPTS[prompt_id] + + custom_prompt_callback: Optional[CustomPromptManagement] = None + litellm_params_data = prompt.litellm_params + verbose_proxy_logger.debug("litellm_params= %s", litellm_params_data) + + if isinstance(litellm_params_data, dict): + litellm_params = PromptLiteLLMParams(**litellm_params_data) + else: + litellm_params = litellm_params_data + + prompt_integration = litellm_params.prompt_integration + if prompt_integration is None: + raise ValueError("prompt_integration is required") + initializer = self.gitlab_prompt_cache.prompt_manager + + if initializer: + custom_prompt_callback = initializer(litellm_params, prompt) + if not isinstance(custom_prompt_callback, CustomPromptManagement): + raise ValueError( + f"CustomPromptManagement is required, got {type(custom_prompt_callback)}" + ) + litellm.logging_callback_manager.add_litellm_callback(custom_prompt_callback) # type: ignore + else: + raise ValueError(f"Unsupported prompt: {prompt_integration}") + + parsed_prompt = PromptSpec( + prompt_id=prompt_id, + litellm_params=litellm_params, + prompt_info=prompt.prompt_info or PromptInfo(prompt_type="config"), + created_at=prompt.created_at, + updated_at=prompt.updated_at, + ) + + # store references to the prompt in memory + self.IN_MEMORY_PROMPTS[prompt_id] = parsed_prompt + self.prompt_id_to_custom_prompt[prompt_id] = custom_prompt_callback + + return parsed_prompt + + def get_prompt_by_id(self, prompt_id: str) -> Optional[PromptSpec]: + """ + Get a prompt by its ID from memory + """ + return self.IN_MEMORY_PROMPTS.get(prompt_id) + + def get_prompt_callback_by_id( + self, prompt_id: str + ) -> Optional[CustomPromptManagement]: + """ + Get a prompt callback by its ID from memory + """ + return self.prompt_id_to_custom_prompt.get(prompt_id) + + +GITLAB_PROMPT_REGISTRY = GitlabPromptRegistry() + + +class UnifiedPromptRegistry: + """ + Aggregate multiple prompt registries behind one interface. + + Exposes: + - IN_MEMORY_PROMPTS: aggregated view (precedence-aware) of PromptSpecs + + Registry contract (best-effort; we detect what exists): + - .IN_MEMORY_PROMPTS: Dict[str, PromptSpec] (preferred for fast indexing) + - .get_prompt_by_id(prompt_id) -> Optional[PromptSpec] + - .get_prompt_callback_by_id(prompt_id) -> Optional[CustomPromptManagement] + - .initialize_prompt(prompt: PromptSpec, config_file_path: Optional[str] = None) + - .load_all() -> None (optional) + """ + + def __init__(self) -> None: + # Precedence-preserving container + self._registries: "OrderedDict[str, object]" = OrderedDict() + self._integration_to_registry: Dict[str, str] = {} + # Aggregated, precedence-aware cache + self.IN_MEMORY_PROMPTS: Dict[str, PromptSpec] = {} + + # ----------------------------- + # Wiring & setup + # ----------------------------- + + def register_registry(self, name: str, registry: object) -> None: + """Add/replace a registry in search order, then rebuild the aggregate cache.""" + if name in self._registries: + self._registries[name] = registry + else: + self._registries[name] = registry + verbose_proxy_logger.debug("UnifiedPromptRegistry: registered %s", name) + + lname = name.lower() + if "gitlab" in lname: + self._integration_to_registry.setdefault("gitlab", name) + if "memory" in lname or "in_memory" in lname: + self._integration_to_registry.setdefault("in_memory", name) + + self._rebuild_cache() + + def set_integration_route(self, prompt_integration: str, registry_name: str) -> None: + if registry_name not in self._registries: + raise ValueError(f"Unknown registry '{registry_name}'") + self._integration_to_registry[prompt_integration] = registry_name + + # ----------------------------- + # Bulk preload / listing + # ----------------------------- + + def load_all(self) -> None: + """Ask each registry to preload if supported, then refresh the aggregate cache.""" + for name, reg in self._registries.items(): + if hasattr(reg, "load_all"): + try: + reg.load_all() # type: ignore[attr-defined] + verbose_proxy_logger.debug("UnifiedPromptRegistry: %s.load_all() OK", name) + except Exception as e: + verbose_proxy_logger.debug("UnifiedPromptRegistry: %s.load_all() failed: %s", name, e) + self._rebuild_cache() + return self.IN_MEMORY_PROMPTS + + def list_prompt_ids(self) -> List[str]: + """IDs from the aggregated cache (precedence already applied).""" + return list(self.IN_MEMORY_PROMPTS.keys()) + + def list_prompts(self) -> List[PromptSpec]: + """PromptSpecs from the aggregated cache.""" + return list(self.IN_MEMORY_PROMPTS.values()) + + # ----------------------------- + # Lookups + # ----------------------------- + + def get_prompt_by_id(self, prompt_id: str) -> Optional[PromptSpec]: + """Fast path via aggregated cache; fallback to registries and update cache on hit.""" + spec = self.IN_MEMORY_PROMPTS.get(prompt_id) + verbose_proxy_logger.debug(f"Found spec for {prompt_id} --> {spec}") + if spec is not None: + return spec + + # Fallback: search registries in order; on success, cache it + for _, reg in self._registries.items(): + # Fast dict check + im = getattr(reg, "IN_MEMORY_PROMPTS", None) + if isinstance(im, dict) and prompt_id in im: + self._cache_if_absent(prompt_id, im[prompt_id]) + return im[prompt_id] + # Accessor + if hasattr(reg, "get_prompt_by_id"): + found = reg.get_prompt_by_id(prompt_id) # type: ignore[attr-defined] + if found is not None: + self._cache_if_absent(prompt_id, found) + return found + return None + + def get_prompt_callback_by_id(self, prompt_id: str) -> Optional[CustomPromptManagement]: + """Lookup callback by searching registries in precedence order.""" + for _, reg in self._registries.items(): + if hasattr(reg, "get_prompt_callback_by_id"): + cb = reg.get_prompt_callback_by_id(prompt_id) # type: ignore[attr-defined] + if cb is not None: + return cb + return None + + def find_with_origin(self, prompt_id: str) -> Optional[Tuple[str, PromptSpec]]: + """Return (registry_name, PromptSpec) for the first match in precedence order.""" + # Try aggregate cache first; if present, identify origin with a pass through registries + spec = self.IN_MEMORY_PROMPTS.get(prompt_id) + if spec is not None: + for name, reg in self._registries.items(): + im = getattr(reg, "IN_MEMORY_PROMPTS", None) + if isinstance(im, dict) and prompt_id in im: + return name, im[prompt_id] + if hasattr(reg, "get_prompt_by_id"): + found = reg.get_prompt_by_id(prompt_id) # type: ignore[attr-defined] + if found is spec: + return name, found + # Fallback: unknown origin but we do have the spec + return "", spec + + # Not in cache: search registries + for name, reg in self._registries.items(): + im = getattr(reg, "IN_MEMORY_PROMPTS", None) + if isinstance(im, dict) and prompt_id in im: + self._cache_if_absent(prompt_id, im[prompt_id]) + return name, im[prompt_id] + if hasattr(reg, "get_prompt_by_id"): + found = reg.get_prompt_by_id(prompt_id) # type: ignore[attr-defined] + if found is not None: + self._cache_if_absent(prompt_id, found) + return name, found + return None + + # ----------------------------- + # Initialization routing + # ----------------------------- + + def initialize_prompt(self, prompt: PromptSpec, config_file_path: Optional[str] = None) -> Optional[PromptSpec]: + """ + Route initialization to the correct underlying registry based on + prompt.litellm_params.prompt_integration, then refresh cache for that id. + """ + integration = None + try: + lp = prompt.litellm_params + integration = getattr(lp, "prompt_integration", None) + if integration is None and isinstance(lp, dict): + integration = lp.get("prompt_integration") + except Exception: + pass + + target_name = self._integration_to_registry.get(str(integration).lower()) if integration else None + target_registry = self._registries.get(target_name) if target_name else next(iter(self._registries.values()), None) + + if target_registry is None: + raise RuntimeError("UnifiedPromptRegistry has no registered registries to initialize the prompt.") + if not hasattr(target_registry, "initialize_prompt"): + raise RuntimeError(f"Registry '{target_name or 'UNKNOWN'}' does not support initialize_prompt().") + + initialized = target_registry.initialize_prompt(prompt, config_file_path) # type: ignore[attr-defined] + + # Update aggregate cache just for this id (avoid full rebuild) + if initialized is not None: + self._cache_replace(prompt.prompt_id, initialized) + + return initialized + + # ----------------------------- + # Cache management + # ----------------------------- + + def refresh(self) -> None: + """Public method to rebuild the aggregate cache on demand.""" + self._rebuild_cache() + + def _rebuild_cache(self) -> None: + """Rebuild aggregated IN_MEMORY_PROMPTS respecting registry precedence.""" + agg: Dict[str, PromptSpec] = {} + seen: set[str] = set() + + for _, reg in self._registries.items(): + # Prefer direct dict for speed + im = getattr(reg, "IN_MEMORY_PROMPTS", None) + if isinstance(im, dict): + for pid, spec in im.items(): + if pid not in seen: + agg[pid] = spec + seen.add(pid) + continue + + # Fallback: if no dict, try to iterate via list of ids from accessor + ids: Iterable[str] = [] + if hasattr(reg, "list_prompt_ids"): + try: + ids = reg.list_prompt_ids() # type: ignore[attr-defined] + except Exception: + ids = [] + for pid in ids: + if pid in seen: + continue + spec = None + if hasattr(reg, "get_prompt_by_id"): + spec = reg.get_prompt_by_id(pid) # type: ignore[attr-defined] + if spec is not None: + agg[pid] = spec + seen.add(pid) + + self.IN_MEMORY_PROMPTS = agg + + def _cache_if_absent(self, prompt_id: str, spec: PromptSpec) -> None: + """Add to aggregate cache if not present (without breaking precedence).""" + if prompt_id not in self.IN_MEMORY_PROMPTS: + self.IN_MEMORY_PROMPTS[prompt_id] = spec + + def _cache_replace(self, prompt_id: str, spec: PromptSpec) -> None: + """ + Replace/insert a single PromptSpec in the aggregate cache. + Safe after initializing via the correct backend. + """ + self.IN_MEMORY_PROMPTS[prompt_id] = spec + +PROMPT_HUB = UnifiedPromptRegistry() +PROMPT_HUB.register_registry("in_memory", IN_MEMORY_PROMPT_REGISTRY) +PROMPT_HUB.register_registry("gitlab", GITLAB_PROMPT_REGISTRY) diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 55e4a96b5b01..c5f28f5cf9a0 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -3028,6 +3028,7 @@ async def _init_non_llm_objects_in_db(self, prisma_client: PrismaClient): await self._init_mcp_servers_in_db() await self._init_pass_through_endpoints_in_db() await self._init_prompts_in_db(prisma_client=prisma_client) + await self._init_unified_prompts() await self._check_and_reload_model_cost_map(prisma_client=prisma_client) async def _check_and_reload_model_cost_map(self, prisma_client: PrismaClient): @@ -3145,6 +3146,19 @@ async def _init_prompts_in_db(self, prisma_client: PrismaClient): ) ) + async def _init_unified_prompts(self): + from litellm.proxy.prompts.prompt_registry import PROMPT_HUB + + try: + prompts = PROMPT_HUB.load_all() + verbose_proxy_logger.debug( + f"found these prompts {list(prompts.keys())}" + ) + except Exception as e: + verbose_proxy_logger.debug( + f"litellm.proxy.proxy_server.py::ProxyConfig:_init_unified_prompts - {str(e)}" + ) + async def _init_guardrails_in_db(self, prisma_client: PrismaClient): from litellm.proxy.guardrails.guardrail_registry import ( IN_MEMORY_GUARDRAIL_HANDLER,