diff --git a/.pylintrc b/.pylintrc
index e4b355bb..ca1827b6 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -575,7 +575,7 @@ contextmanager-decorators=contextlib.contextmanager
# List of members which are set dynamically and missed by pylint inference
# system, and so shouldn't trigger E1101 when accessed. Python regular
# expressions are accepted.
-generated-members=
+generated-members=sqlalchemy.func.*
# Tells whether to warn about missing members when the owner of the attribute
# is inferred to be None.
diff --git a/MANIFEST.in b/MANIFEST.in
index 95f430a1..8bf2ca97 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -13,6 +13,7 @@ include Containerfile.lite
include __init__
include alembic.ini
include tox.ini
+include alembic/README
# 2️⃣ Top-level config, examples and helper scripts
include *.py
diff --git a/mcpgateway/admin.py b/mcpgateway/admin.py
index 8cc8a360..9f88f304 100644
--- a/mcpgateway/admin.py
+++ b/mcpgateway/admin.py
@@ -3975,118 +3975,81 @@ async def admin_delete_root(uri: str, request: Request, user: str = Depends(requ
MetricsDict = Dict[str, Union[ToolMetrics, ResourceMetrics, ServerMetrics, PromptMetrics]]
-@admin_router.get("/metrics", response_model=MetricsDict)
-async def admin_get_metrics(
+# @admin_router.get("/metrics", response_model=MetricsDict)
+# async def admin_get_metrics(
+# db: Session = Depends(get_db),
+# user: str = Depends(require_auth),
+# ) -> MetricsDict:
+# """
+# Retrieve aggregate metrics for all entity types via the admin UI.
+
+# This endpoint collects and returns usage metrics for tools, resources, servers,
+# and prompts. The metrics are retrieved by calling the aggregate_metrics method
+# on each respective service, which compiles statistics about usage patterns,
+# success rates, and other relevant metrics for administrative monitoring
+# and analysis purposes.
+
+# Args:
+# db (Session): Database session dependency.
+# user (str): Authenticated user dependency.
+
+# Returns:
+# MetricsDict: A dictionary containing the aggregated metrics for tools,
+# resources, servers, and prompts. Each value is a Pydantic model instance
+# specific to the entity type.
+# """
+# logger.debug(f"User {user} requested aggregate metrics")
+# tool_metrics = await tool_service.aggregate_metrics(db)
+# resource_metrics = await resource_service.aggregate_metrics(db)
+# server_metrics = await server_service.aggregate_metrics(db)
+# prompt_metrics = await prompt_service.aggregate_metrics(db)
+
+# # Return actual Pydantic model instances
+# return {
+# "tools": tool_metrics,
+# "resources": resource_metrics,
+# "servers": server_metrics,
+# "prompts": prompt_metrics,
+# }
+
+
+@admin_router.get("/metrics")
+async def get_aggregated_metrics(
db: Session = Depends(get_db),
- user: str = Depends(require_auth),
-) -> MetricsDict:
- """
- Retrieve aggregate metrics for all entity types via the admin UI.
+ _user: str = Depends(require_auth),
+) -> Dict[str, Any]:
+ """Retrieve aggregated metrics and top performers for all entity types.
- This endpoint collects and returns usage metrics for tools, resources, servers,
- and prompts. The metrics are retrieved by calling the aggregate_metrics method
- on each respective service, which compiles statistics about usage patterns,
- success rates, and other relevant metrics for administrative monitoring
- and analysis purposes.
+ This endpoint collects usage metrics and top-performing entities for tools,
+ resources, prompts, and servers by calling the respective service methods.
+ The results are compiled into a dictionary for administrative monitoring.
Args:
- db (Session): Database session dependency.
- user (str): Authenticated user dependency.
+ db (Session): Database session dependency for querying metrics.
Returns:
- MetricsDict: A dictionary containing the aggregated metrics for tools,
- resources, servers, and prompts. Each value is a Pydantic model instance
- specific to the entity type.
-
- Examples:
- >>> import asyncio
- >>> from unittest.mock import AsyncMock, MagicMock
- >>> from mcpgateway.schemas import ToolMetrics, ResourceMetrics, ServerMetrics, PromptMetrics
- >>>
- >>> mock_db = MagicMock()
- >>> mock_user = "test_user"
- >>>
- >>> mock_tool_metrics = ToolMetrics(
- ... total_executions=10,
- ... successful_executions=9,
- ... failed_executions=1,
- ... failure_rate=0.1,
- ... min_response_time=0.05,
- ... max_response_time=1.0,
- ... avg_response_time=0.3,
- ... last_execution_time=None
- ... )
- >>> mock_resource_metrics = ResourceMetrics(
- ... total_executions=5,
- ... successful_executions=5,
- ... failed_executions=0,
- ... failure_rate=0.0,
- ... min_response_time=0.1,
- ... max_response_time=0.5,
- ... avg_response_time=0.2,
- ... last_execution_time=None
- ... )
- >>> mock_server_metrics = ServerMetrics(
- ... total_executions=7,
- ... successful_executions=7,
- ... failed_executions=0,
- ... failure_rate=0.0,
- ... min_response_time=0.2,
- ... max_response_time=0.7,
- ... avg_response_time=0.4,
- ... last_execution_time=None
- ... )
- >>> mock_prompt_metrics = PromptMetrics(
- ... total_executions=3,
- ... successful_executions=3,
- ... failed_executions=0,
- ... failure_rate=0.0,
- ... min_response_time=0.15,
- ... max_response_time=0.6,
- ... avg_response_time=0.35,
- ... last_execution_time=None
- ... )
- >>>
- >>> original_aggregate_metrics_tool = tool_service.aggregate_metrics
- >>> original_aggregate_metrics_resource = resource_service.aggregate_metrics
- >>> original_aggregate_metrics_server = server_service.aggregate_metrics
- >>> original_aggregate_metrics_prompt = prompt_service.aggregate_metrics
- >>>
- >>> tool_service.aggregate_metrics = AsyncMock(return_value=mock_tool_metrics)
- >>> resource_service.aggregate_metrics = AsyncMock(return_value=mock_resource_metrics)
- >>> server_service.aggregate_metrics = AsyncMock(return_value=mock_server_metrics)
- >>> prompt_service.aggregate_metrics = AsyncMock(return_value=mock_prompt_metrics)
- >>>
- >>> async def test_admin_get_metrics():
- ... result = await admin_get_metrics(mock_db, mock_user)
- ... return (
- ... isinstance(result, dict) and
- ... result.get("tools") == mock_tool_metrics and
- ... result.get("resources") == mock_resource_metrics and
- ... result.get("servers") == mock_server_metrics and
- ... result.get("prompts") == mock_prompt_metrics
- ... )
- >>>
- >>> import asyncio; asyncio.run(test_admin_get_metrics())
- True
- >>>
- >>> tool_service.aggregate_metrics = original_aggregate_metrics_tool
- >>> resource_service.aggregate_metrics = original_aggregate_metrics_resource
- >>> server_service.aggregate_metrics = original_aggregate_metrics_server
- >>> prompt_service.aggregate_metrics = original_aggregate_metrics_prompt
+ Dict[str, Any]: A dictionary containing aggregated metrics and top performers
+ for tools, resources, prompts, and servers. The structure includes:
+ - 'tools': Metrics for tools.
+ - 'resources': Metrics for resources.
+ - 'prompts': Metrics for prompts.
+ - 'servers': Metrics for servers.
+ - 'topPerformers': A nested dictionary with top 5 tools, resources, prompts,
+ and servers.
"""
- logger.debug(f"User {user} requested aggregate metrics")
- tool_metrics = await tool_service.aggregate_metrics(db)
- resource_metrics = await resource_service.aggregate_metrics(db)
- server_metrics = await server_service.aggregate_metrics(db)
- prompt_metrics = await prompt_service.aggregate_metrics(db)
-
- return {
- "tools": tool_metrics,
- "resources": resource_metrics,
- "servers": server_metrics,
- "prompts": prompt_metrics,
+ metrics = {
+ "tools": await tool_service.aggregate_metrics(db),
+ "resources": await resource_service.aggregate_metrics(db),
+ "prompts": await prompt_service.aggregate_metrics(db),
+ "servers": await server_service.aggregate_metrics(db),
+ "topPerformers": {
+ "tools": await tool_service.get_top_tools(db, limit=5),
+ "resources": await resource_service.get_top_resources(db, limit=5),
+ "prompts": await prompt_service.get_top_prompts(db, limit=5),
+ "servers": await server_service.get_top_servers(db, limit=5),
+ },
}
+ return metrics
@admin_router.post("/metrics/reset", response_model=Dict[str, object])
diff --git a/mcpgateway/main.py b/mcpgateway/main.py
index af6ddab7..7e5ca752 100644
--- a/mcpgateway/main.py
+++ b/mcpgateway/main.py
@@ -29,6 +29,7 @@
import asyncio
from contextlib import asynccontextmanager
import json
+import time
from typing import Any, AsyncIterator, Dict, List, Optional, Union
from urllib.parse import urlparse, urlunparse
@@ -52,7 +53,7 @@
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from pydantic import ValidationError
-from sqlalchemy import text
+from sqlalchemy import select, text
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import Session
from starlette.middleware.base import BaseHTTPMiddleware
@@ -64,7 +65,8 @@
from mcpgateway.bootstrap_db import main as bootstrap_db
from mcpgateway.cache import ResourceCache, SessionRegistry
from mcpgateway.config import jsonpath_modifier, settings
-from mcpgateway.db import refresh_slugs_on_startup, SessionLocal
+from mcpgateway.db import Prompt as DbPrompt
+from mcpgateway.db import PromptMetric, refresh_slugs_on_startup, SessionLocal
from mcpgateway.handlers.sampling import SamplingHandler
from mcpgateway.models import (
InitializeRequest,
@@ -1780,17 +1782,49 @@ async def get_prompt(
Returns:
Rendered prompt or metadata.
+
+ Raises:
+ Exception: Re-raised if not a handled exception type.
"""
logger.debug(f"User: {user} requested prompt: {name} with args={args}")
+ start_time = time.monotonic()
+ success = False
+ error_message = None
+ result = None
+
try:
PromptExecuteArgs(args=args)
- return await prompt_service.get_prompt(db, name, args)
+ result = await prompt_service.get_prompt(db, name, args)
+ success = True
+ logger.debug(f"Prompt execution successful for '{name}'")
except Exception as ex:
+ error_message = str(ex)
logger.error(f"Could not retrieve prompt {name}: {ex}")
if isinstance(ex, (ValueError, PromptError)):
- return JSONResponse(content={"message": "Prompt execution arguments contains HTML tags that may cause security issues"}, status_code=422)
- if isinstance(ex, PluginViolationError):
- return JSONResponse(content={"message": "Prompt execution arguments contains HTML tags that may cause security issues", "details": ex.message}, status_code=422)
+ result = JSONResponse(content={"message": "Prompt execution arguments contains HTML tags that may cause security issues"}, status_code=422)
+ elif isinstance(ex, PluginViolationError):
+ result = JSONResponse(content={"message": "Prompt execution arguments contains HTML tags that may cause security issues", "details": ex.message}, status_code=422)
+ else:
+ raise
+
+ # Record metrics (moved outside try/except/finally to ensure it runs)
+ end_time = time.monotonic()
+ response_time = end_time - start_time
+
+ # Get the prompt from database to get its ID
+ prompt = db.execute(select(DbPrompt).where(DbPrompt.name == name)).scalar_one_or_none()
+
+ if prompt:
+ metric = PromptMetric(
+ prompt_id=prompt.id,
+ response_time=response_time,
+ is_success=success,
+ error_message=error_message,
+ )
+ db.add(metric)
+ db.commit()
+
+ return result
@prompt_router.get("/{name}")
@@ -1810,9 +1844,41 @@ async def get_prompt_no_args(
Returns:
The prompt template information
+
+ Raises:
+ Exception: Re-raised from prompt service.
"""
logger.debug(f"User: {user} requested prompt: {name} with no arguments")
- return await prompt_service.get_prompt(db, name, {})
+ start_time = time.monotonic()
+ success = False
+ error_message = None
+ result = None
+
+ try:
+ result = await prompt_service.get_prompt(db, name, {})
+ success = True
+ except Exception as ex:
+ error_message = str(ex)
+ raise
+
+ # Record metrics
+ end_time = time.monotonic()
+ response_time = end_time - start_time
+
+ # Get the prompt from database to get its ID
+ prompt = db.execute(select(DbPrompt).where(DbPrompt.name == name)).scalar_one_or_none()
+
+ if prompt:
+ metric = PromptMetric(
+ prompt_id=prompt.id,
+ response_time=response_time,
+ is_success=success,
+ error_message=error_message,
+ )
+ db.add(metric)
+ db.commit()
+
+ return result
@prompt_router.put("/{name}", response_model=PromptRead)
diff --git a/mcpgateway/schemas.py b/mcpgateway/schemas.py
index 7ffb07d9..9f0a68c1 100644
--- a/mcpgateway/schemas.py
+++ b/mcpgateway/schemas.py
@@ -590,7 +590,7 @@ def prevent_manual_mcp_creation(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""
integration_type = values.get("integration_type")
if integration_type == "MCP":
- raise ValueError("Cannot manually create MCP tools. Add MCP servers via the Gateways interface - " "tools will be auto-discovered and registered with integration_type='MCP'.")
+ raise ValueError("Cannot manually create MCP tools. Add MCP servers via the Gateways interface - tools will be auto-discovered and registered with integration_type='MCP'.")
return values
@@ -2936,3 +2936,26 @@ class TagInfo(BaseModelWithConfigDict):
name: str = Field(..., description="The tag name")
stats: TagStats = Field(..., description="Statistics for this tag")
entities: Optional[List[TaggedEntity]] = Field(default_factory=list, description="Entities that have this tag")
+
+
+class TopPerformer(BaseModelWithConfigDict):
+ """Schema for representing top-performing entities with performance metrics.
+
+ Used to encapsulate metrics for entities such as prompts, resources, servers, or tools,
+ including execution count, average response time, success rate, and last execution timestamp.
+
+ Attributes:
+ id (Union[str, int]): Unique identifier for the entity.
+ name (str): Name of the entity (e.g., prompt name, resource URI, server name, or tool name).
+ execution_count (int): Total number of executions for the entity.
+ avg_response_time (Optional[float]): Average response time in seconds, or None if no metrics.
+ success_rate (Optional[float]): Success rate percentage, or None if no metrics.
+ last_execution (Optional[datetime]): Timestamp of the last execution, or None if no metrics.
+ """
+
+ id: Union[str, int] = Field(..., description="Entity ID")
+ name: str = Field(..., description="Entity name")
+ execution_count: int = Field(..., description="Number of executions")
+ avg_response_time: Optional[float] = Field(None, description="Average response time in seconds")
+ success_rate: Optional[float] = Field(None, description="Success rate percentage")
+ last_execution: Optional[datetime] = Field(None, description="Timestamp of last execution")
diff --git a/mcpgateway/services/logging_service.py b/mcpgateway/services/logging_service.py
index 33b7d97a..4888ac93 100644
--- a/mcpgateway/services/logging_service.py
+++ b/mcpgateway/services/logging_service.py
@@ -123,7 +123,7 @@ async def initialize(self) -> None:
try:
root_logger.addHandler(_get_file_handler())
if settings.log_rotation_enabled:
- logging.info(f"File logging enabled with rotation: {settings.log_folder or '.'}/{settings.log_file} " f"(max: {settings.log_max_size_mb}MB, backups: {settings.log_backup_count})")
+ logging.info(f"File logging enabled with rotation: {settings.log_folder or '.'}/{settings.log_file} (max: {settings.log_max_size_mb}MB, backups: {settings.log_backup_count})")
else:
logging.info(f"File logging enabled (no rotation): {settings.log_folder or '.'}/{settings.log_file}")
except Exception as e:
diff --git a/mcpgateway/services/prompt_service.py b/mcpgateway/services/prompt_service.py
index 8094ddd6..fb826827 100644
--- a/mcpgateway/services/prompt_service.py
+++ b/mcpgateway/services/prompt_service.py
@@ -23,7 +23,7 @@
# Third-Party
from jinja2 import Environment, meta, select_autoescape
-from sqlalchemy import delete, func, not_, select
+from sqlalchemy import case, delete, desc, Float, func, not_, select
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import Session
@@ -33,8 +33,9 @@
from mcpgateway.db import PromptMetric, server_prompt_association
from mcpgateway.models import Message, PromptResult, Role, TextContent
from mcpgateway.plugins import GlobalContext, PluginManager, PluginViolationError, PromptPosthookPayload, PromptPrehookPayload
-from mcpgateway.schemas import PromptCreate, PromptRead, PromptUpdate
+from mcpgateway.schemas import PromptCreate, PromptRead, PromptUpdate, TopPerformer
from mcpgateway.services.logging_service import LoggingService
+from mcpgateway.utils.metrics_common import build_top_performers
# Initialize logging service first
logging_service = LoggingService()
@@ -139,6 +140,50 @@ async def shutdown(self) -> None:
self._event_subscribers.clear()
logger.info("Prompt service shutdown complete")
+ async def get_top_prompts(self, db: Session, limit: int = 5) -> List[TopPerformer]:
+ """Retrieve the top-performing prompts based on execution count.
+
+ Queries the database to get prompts with their metrics, ordered by the number of executions
+ in descending order. Returns a list of TopPerformer objects containing prompt details and
+ performance metrics.
+
+ Args:
+ db (Session): Database session for querying prompt metrics.
+ limit (int): Maximum number of prompts to return. Defaults to 5.
+
+ Returns:
+ List[TopPerformer]: A list of TopPerformer objects, each containing:
+ - id: Prompt ID.
+ - name: Prompt name.
+ - execution_count: Total number of executions.
+ - avg_response_time: Average response time in seconds, or None if no metrics.
+ - success_rate: Success rate percentage, or None if no metrics.
+ - last_execution: Timestamp of the last execution, or None if no metrics.
+ """
+ results = (
+ db.query(
+ DbPrompt.id,
+ DbPrompt.name,
+ func.count(PromptMetric.id).label("execution_count"), # pylint: disable=not-callable
+ func.avg(PromptMetric.response_time).label("avg_response_time"), # pylint: disable=not-callable
+ case(
+ (
+ func.count(PromptMetric.id) > 0, # pylint: disable=not-callable
+ func.sum(case((PromptMetric.is_success == 1, 1), else_=0)).cast(Float) / func.count(PromptMetric.id) * 100, # pylint: disable=not-callable
+ ),
+ else_=None,
+ ).label("success_rate"),
+ func.max(PromptMetric.timestamp).label("last_execution"), # pylint: disable=not-callable
+ )
+ .outerjoin(PromptMetric)
+ .group_by(DbPrompt.id, DbPrompt.name)
+ .order_by(desc("execution_count"))
+ .limit(limit)
+ .all()
+ )
+
+ return build_top_performers(results)
+
def _convert_db_prompt(self, db_prompt: DbPrompt) -> Dict[str, Any]:
"""
Convert a DbPrompt instance to a dictionary matching the PromptRead schema,
@@ -1005,8 +1050,8 @@ async def aggregate_metrics(self, db: Session) -> Dict[str, Any]:
"""
total = db.execute(select(func.count(PromptMetric.id))).scalar() or 0 # pylint: disable=not-callable
- successful = db.execute(select(func.count(PromptMetric.id)).where(PromptMetric.is_success)).scalar() or 0 # pylint: disable=not-callable
- failed = db.execute(select(func.count(PromptMetric.id)).where(not_(PromptMetric.is_success))).scalar() or 0 # pylint: disable=not-callable
+ successful = db.execute(select(func.count(PromptMetric.id)).where(PromptMetric.is_success == 1)).scalar() or 0 # pylint: disable=not-callable
+ failed = db.execute(select(func.count(PromptMetric.id)).where(PromptMetric.is_success == 0)).scalar() or 0 # pylint: disable=not-callable
failure_rate = failed / total if total > 0 else 0.0
min_rt = db.execute(select(func.min(PromptMetric.response_time))).scalar()
max_rt = db.execute(select(func.max(PromptMetric.response_time))).scalar()
diff --git a/mcpgateway/services/resource_service.py b/mcpgateway/services/resource_service.py
index 1030b74d..24412fbe 100644
--- a/mcpgateway/services/resource_service.py
+++ b/mcpgateway/services/resource_service.py
@@ -33,7 +33,7 @@
# Third-Party
import parse
-from sqlalchemy import delete, func, not_, select
+from sqlalchemy import case, delete, desc, Float, func, not_, select
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import Session
@@ -43,14 +43,9 @@
from mcpgateway.db import ResourceSubscription as DbSubscription
from mcpgateway.db import server_resource_association
from mcpgateway.models import ResourceContent, ResourceTemplate, TextContent
-from mcpgateway.schemas import (
- ResourceCreate,
- ResourceMetrics,
- ResourceRead,
- ResourceSubscription,
- ResourceUpdate,
-)
+from mcpgateway.schemas import ResourceCreate, ResourceMetrics, ResourceRead, ResourceSubscription, ResourceUpdate, TopPerformer
from mcpgateway.services.logging_service import LoggingService
+from mcpgateway.utils.metrics_common import build_top_performers
# Initialize logging service first
logging_service = LoggingService()
@@ -118,6 +113,50 @@ async def shutdown(self) -> None:
self._event_subscribers.clear()
logger.info("Resource service shutdown complete")
+ async def get_top_resources(self, db: Session, limit: int = 5) -> List[TopPerformer]:
+ """Retrieve the top-performing resources based on execution count.
+
+ Queries the database to get resources with their metrics, ordered by the number of executions
+ in descending order. Uses the resource URI as the name field for TopPerformer objects.
+ Returns a list of TopPerformer objects containing resource details and performance metrics.
+
+ Args:
+ db (Session): Database session for querying resource metrics.
+ limit (int): Maximum number of resources to return. Defaults to 5.
+
+ Returns:
+ List[TopPerformer]: A list of TopPerformer objects, each containing:
+ - id: Resource ID.
+ - name: Resource URI (used as the name field).
+ - execution_count: Total number of executions.
+ - avg_response_time: Average response time in seconds, or None if no metrics.
+ - success_rate: Success rate percentage, or None if no metrics.
+ - last_execution: Timestamp of the last execution, or None if no metrics.
+ """
+ results = (
+ db.query(
+ DbResource.id,
+ DbResource.uri.label("name"), # Using URI as the name field for TopPerformer
+ func.count(ResourceMetric.id).label("execution_count"), # pylint: disable=not-callable
+ func.avg(ResourceMetric.response_time).label("avg_response_time"), # pylint: disable=not-callable
+ case(
+ (
+ func.count(ResourceMetric.id) > 0, # pylint: disable=not-callable
+ func.sum(case((ResourceMetric.is_success == 1, 1), else_=0)).cast(Float) / func.count(ResourceMetric.id) * 100, # pylint: disable=not-callable
+ ),
+ else_=None,
+ ).label("success_rate"),
+ func.max(ResourceMetric.timestamp).label("last_execution"), # pylint: disable=not-callable
+ )
+ .outerjoin(ResourceMetric)
+ .group_by(DbResource.id, DbResource.uri)
+ .order_by(desc("execution_count"))
+ .limit(limit)
+ .all()
+ )
+
+ return build_top_performers(results)
+
def _convert_resource_to_read(self, resource: DbResource) -> ResourceRead:
"""
Converts a DbResource instance into a ResourceRead model, including aggregated metrics.
@@ -1001,9 +1040,9 @@ async def aggregate_metrics(self, db: Session) -> ResourceMetrics:
"""
total_executions = db.execute(select(func.count()).select_from(ResourceMetric)).scalar() or 0 # pylint: disable=not-callable
- successful_executions = db.execute(select(func.count()).select_from(ResourceMetric).where(ResourceMetric.is_success)).scalar() or 0 # pylint: disable=not-callable
+ successful_executions = db.execute(select(func.count()).select_from(ResourceMetric).where(ResourceMetric.is_success == 1)).scalar() or 0 # pylint: disable=not-callable
- failed_executions = db.execute(select(func.count()).select_from(ResourceMetric).where(not_(ResourceMetric.is_success))).scalar() or 0 # pylint: disable=not-callable
+ failed_executions = db.execute(select(func.count()).select_from(ResourceMetric).where(ResourceMetric.is_success == 0)).scalar() or 0 # pylint: disable=not-callable
min_response_time = db.execute(select(func.min(ResourceMetric.response_time))).scalar()
diff --git a/mcpgateway/services/server_service.py b/mcpgateway/services/server_service.py
index 96b7c37a..46a1db0d 100644
--- a/mcpgateway/services/server_service.py
+++ b/mcpgateway/services/server_service.py
@@ -19,7 +19,7 @@
# Third-Party
import httpx
-from sqlalchemy import delete, func, not_, select
+from sqlalchemy import case, delete, desc, Float, func, select
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import Session
@@ -30,8 +30,9 @@
from mcpgateway.db import Server as DbServer
from mcpgateway.db import ServerMetric
from mcpgateway.db import Tool as DbTool
-from mcpgateway.schemas import ServerCreate, ServerMetrics, ServerRead, ServerUpdate
+from mcpgateway.schemas import ServerCreate, ServerMetrics, ServerRead, ServerUpdate, TopPerformer
from mcpgateway.services.logging_service import LoggingService
+from mcpgateway.utils.metrics_common import build_top_performers
# Initialize logging service first
logging_service = LoggingService()
@@ -126,6 +127,51 @@ async def shutdown(self) -> None:
await self._http_client.aclose()
logger.info("Server service shutdown complete")
+ # get_top_server
+ async def get_top_servers(self, db: Session, limit: int = 5) -> List[TopPerformer]:
+ """Retrieve the top-performing servers based on execution count.
+
+ Queries the database to get servers with their metrics, ordered by the number of executions
+ in descending order. Returns a list of TopPerformer objects containing server details and
+ performance metrics.
+
+ Args:
+ db (Session): Database session for querying server metrics.
+ limit (int): Maximum number of servers to return. Defaults to 5.
+
+ Returns:
+ List[TopPerformer]: A list of TopPerformer objects, each containing:
+ - id: Server ID.
+ - name: Server name.
+ - execution_count: Total number of executions.
+ - avg_response_time: Average response time in seconds, or None if no metrics.
+ - success_rate: Success rate percentage, or None if no metrics.
+ - last_execution: Timestamp of the last execution, or None if no metrics.
+ """
+ results = (
+ db.query(
+ DbServer.id,
+ DbServer.name,
+ func.count(ServerMetric.id).label("execution_count"), # pylint: disable=not-callable
+ func.avg(ServerMetric.response_time).label("avg_response_time"), # pylint: disable=not-callable
+ case(
+ (
+ func.count(ServerMetric.id) > 0, # pylint: disable=not-callable
+ func.sum(case((ServerMetric.is_success == 1, 1), else_=0)).cast(Float) / func.count(ServerMetric.id) * 100, # pylint: disable=not-callable
+ ),
+ else_=None,
+ ).label("success_rate"),
+ func.max(ServerMetric.timestamp).label("last_execution"), # pylint: disable=not-callable
+ )
+ .outerjoin(ServerMetric)
+ .group_by(DbServer.id, DbServer.name)
+ .order_by(desc("execution_count"))
+ .limit(limit)
+ .all()
+ )
+
+ return build_top_performers(results)
+
def _convert_server_to_read(self, server: DbServer) -> ServerRead:
"""
Converts a DbServer instance into a ServerRead model, including aggregated metrics.
@@ -787,9 +833,9 @@ async def aggregate_metrics(self, db: Session) -> ServerMetrics:
"""
total_executions = db.execute(select(func.count()).select_from(ServerMetric)).scalar() or 0 # pylint: disable=not-callable
- successful_executions = db.execute(select(func.count()).select_from(ServerMetric).where(ServerMetric.is_success)).scalar() or 0 # pylint: disable=not-callable
+ successful_executions = db.execute(select(func.count()).select_from(ServerMetric).where(ServerMetric.is_success == 1)).scalar() or 0 # pylint: disable=not-callable
- failed_executions = db.execute(select(func.count()).select_from(ServerMetric).where(not_(ServerMetric.is_success))).scalar() or 0 # pylint: disable=not-callable
+ failed_executions = db.execute(select(func.count()).select_from(ServerMetric).where(ServerMetric.is_success == 0)).scalar() or 0 # pylint: disable=not-callable
min_response_time = db.execute(select(func.min(ServerMetric.response_time))).scalar()
diff --git a/mcpgateway/services/tool_service.py b/mcpgateway/services/tool_service.py
index 55c94727..404aa6ba 100644
--- a/mcpgateway/services/tool_service.py
+++ b/mcpgateway/services/tool_service.py
@@ -28,7 +28,7 @@
from mcp import ClientSession
from mcp.client.sse import sse_client
from mcp.client.streamable_http import streamablehttp_client
-from sqlalchemy import delete, func, not_, select
+from sqlalchemy import case, delete, desc, Float, func, not_, select
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import Session
@@ -41,13 +41,10 @@
from mcpgateway.models import TextContent, ToolResult
from mcpgateway.plugins.framework.manager import PluginManager
from mcpgateway.plugins.framework.plugin_types import GlobalContext, PluginViolationError, ToolPostInvokePayload, ToolPreInvokePayload
-from mcpgateway.schemas import (
- ToolCreate,
- ToolRead,
- ToolUpdate,
-)
+from mcpgateway.schemas import ToolCreate, ToolRead, ToolUpdate, TopPerformer
from mcpgateway.services.logging_service import LoggingService
from mcpgateway.utils.create_slug import slugify
+from mcpgateway.utils.metrics_common import build_top_performers
from mcpgateway.utils.passthrough_headers import get_passthrough_headers
from mcpgateway.utils.retry_manager import ResilientHttpClient
from mcpgateway.utils.services_auth import decode_auth
@@ -193,6 +190,50 @@ async def shutdown(self) -> None:
await self._http_client.aclose()
logger.info("Tool service shutdown complete")
+ async def get_top_tools(self, db: Session, limit: int = 5) -> List[TopPerformer]:
+ """Retrieve the top-performing tools based on execution count.
+
+ Queries the database to get tools with their metrics, ordered by the number of executions
+ in descending order. Returns a list of TopPerformer objects containing tool details and
+ performance metrics.
+
+ Args:
+ db (Session): Database session for querying tool metrics.
+ limit (int): Maximum number of tools to return. Defaults to 5.
+
+ Returns:
+ List[TopPerformer]: A list of TopPerformer objects, each containing:
+ - id: Tool ID.
+ - name: Tool name.
+ - execution_count: Total number of executions.
+ - avg_response_time: Average response time in seconds, or None if no metrics.
+ - success_rate: Success rate percentage, or None if no metrics.
+ - last_execution: Timestamp of the last execution, or None if no metrics.
+ """
+ results = (
+ db.query(
+ DbTool.id,
+ DbTool.name,
+ func.count(ToolMetric.id).label("execution_count"), # pylint: disable=not-callable
+ func.avg(ToolMetric.response_time).label("avg_response_time"), # pylint: disable=not-callable
+ case(
+ (
+ func.count(ToolMetric.id) > 0, # pylint: disable=not-callable
+ func.sum(case((ToolMetric.is_success == 1, 1), else_=0)).cast(Float) / func.count(ToolMetric.id) * 100, # pylint: disable=not-callable
+ ),
+ else_=None,
+ ).label("success_rate"),
+ func.max(ToolMetric.timestamp).label("last_execution"), # pylint: disable=not-callable
+ )
+ .outerjoin(ToolMetric)
+ .group_by(DbTool.id, DbTool.name)
+ .order_by(desc("execution_count"))
+ .limit(limit)
+ .all()
+ )
+
+ return build_top_performers(results)
+
def _convert_tool_to_read(self, tool: DbTool) -> ToolRead:
"""Converts a DbTool instance into a ToolRead model, including aggregated metrics and
new API gateway fields: request_type and authentication credentials (masked).
@@ -334,10 +375,12 @@ async def register_tool(self, db: Session, tool: ToolCreate) -> ToolRead:
logger.info(f"Registered tool: {db_tool.name}")
return self._convert_tool_to_read(db_tool)
except IntegrityError as ie:
+ db.rollback()
logger.error(f"IntegrityError during tool registration: {ie}")
- raise ie
- except Exception as ex:
- raise ToolError(f"Failed to register tool: {str(ex)}")
+ raise ToolError(f"Tool already exists: {tool.name}")
+ except Exception as e:
+ db.rollback()
+ raise ToolError(f"Failed to register tool: {str(e)}")
async def list_tools(
self, db: Session, include_inactive: bool = False, cursor: Optional[str] = None, tags: Optional[List[str]] = None, _request_headers: Optional[Dict[str, str]] = None
@@ -686,18 +729,21 @@ async def invoke_tool(self, db: Session, name: str, arguments: Dict[str, Any], r
# Handle 204 No Content responses that have no body
if response.status_code == 204:
tool_result = ToolResult(content=[TextContent(type="text", text="Request completed successfully (No Content)")])
+ # Mark as successful only after all operations complete successfully
+ success = True
elif response.status_code not in [200, 201, 202, 206]:
result = response.json()
tool_result = ToolResult(
content=[TextContent(type="text", text=str(result["error"]) if "error" in result else "Tool error encountered")],
is_error=True,
)
+ # Don't mark as successful for error responses - success remains False
else:
result = response.json()
filtered_response = extract_using_jq(result, tool.jsonpath_filter)
tool_result = ToolResult(content=[TextContent(type="text", text=json.dumps(filtered_response, indent=2))])
-
- success = True
+ # Mark as successful only after all operations complete successfully
+ success = True
elif tool.integration_type == "MCP":
transport = tool.request_type.lower()
gateway = db.execute(select(DbGateway).where(DbGateway.id == tool.gateway_id).where(DbGateway.enabled)).scalar_one_or_none()
@@ -747,9 +793,10 @@ async def connect_to_streamablehttp_server(server_url: str):
tool_call_result = await connect_to_streamablehttp_server(tool_gateway.url)
content = tool_call_result.model_dump(by_alias=True).get("content", [])
- success = True
filtered_response = extract_using_jq(content, tool.jsonpath_filter)
tool_result = ToolResult(content=filtered_response)
+ # Mark as successful only after all operations complete successfully
+ success = True
else:
tool_result = ToolResult(content=[TextContent(type="text", text="Invalid tool type")])
@@ -1064,8 +1111,8 @@ async def aggregate_metrics(self, db: Session) -> Dict[str, Any]:
"""
total = db.execute(select(func.count(ToolMetric.id))).scalar() or 0 # pylint: disable=not-callable
- successful = db.execute(select(func.count(ToolMetric.id)).where(ToolMetric.is_success)).scalar() or 0 # pylint: disable=not-callable
- failed = db.execute(select(func.count(ToolMetric.id)).where(not_(ToolMetric.is_success))).scalar() or 0 # pylint: disable=not-callable
+ successful = db.execute(select(func.count(ToolMetric.id)).where(ToolMetric.is_success == 1)).scalar() or 0 # pylint: disable=not-callable
+ failed = db.execute(select(func.count(ToolMetric.id)).where(ToolMetric.is_success == 0)).scalar() or 0 # pylint: disable=not-callable
failure_rate = failed / total if total > 0 else 0.0
min_rt = db.execute(select(func.min(ToolMetric.response_time))).scalar()
max_rt = db.execute(select(func.max(ToolMetric.response_time))).scalar()
diff --git a/mcpgateway/static/admin.js b/mcpgateway/static/admin.js
index 18f4d24b..73bb501e 100644
--- a/mcpgateway/static/admin.js
+++ b/mcpgateway/static/admin.js
@@ -890,7 +890,9 @@ function displayMetrics(data) {
// Top Performers section (before individual metrics)
if (data.topPerformers || data.top) {
const topData = data.topPerformers || data.top;
- const topSection = createTopPerformersSection(topData);
+ // const topSection = createTopPerformersSection(topData);
+ const topSection = createEnhancedTopPerformersSection(topData);
+
mainContainer.appendChild(topSection);
}
@@ -1198,7 +1200,54 @@ function extractKPIData(data) {
/**
* SECURITY: Create top performers section with safe display
*/
-function createTopPerformersSection(topData) {
+// function createTopPerformersSection(topData) {
+// try {
+// const section = document.createElement("div");
+// section.className = "bg-white rounded-lg shadow p-6 dark:bg-gray-800";
+
+// const title = document.createElement("h3");
+// title.className = "text-lg font-medium mb-4 dark:text-gray-200";
+// title.textContent = "Top Performers";
+// section.appendChild(title);
+
+// const grid = document.createElement("div");
+// grid.className = "grid grid-cols-1 md:grid-cols-2 gap-4";
+
+// // Top Tools
+// if (topData.tools && Array.isArray(topData.tools)) {
+// const toolsCard = createTopItemCard("Tools", topData.tools);
+// grid.appendChild(toolsCard);
+// }
+
+// // Top Resources
+// if (topData.resources && Array.isArray(topData.resources)) {
+// const resourcesCard = createTopItemCard(
+// "Resources",
+// topData.resources,
+// );
+// grid.appendChild(resourcesCard);
+// }
+
+// // Top Prompts
+// if (topData.prompts && Array.isArray(topData.prompts)) {
+// const promptsCard = createTopItemCard("Prompts", topData.prompts);
+// grid.appendChild(promptsCard);
+// }
+
+// // Top Servers
+// if (topData.servers && Array.isArray(topData.servers)) {
+// const serversCard = createTopItemCard("Servers", topData.servers);
+// grid.appendChild(serversCard);
+// }
+
+// section.appendChild(grid);
+// return section;
+// } catch (error) {
+// console.error("Error creating top performers section:", error);
+// return document.createElement("div"); // Safe fallback
+// }
+// }
+function createEnhancedTopPerformersSection(topData) {
try {
const section = document.createElement("div");
section.className = "bg-white rounded-lg shadow p-6 dark:bg-gray-800";
@@ -1206,87 +1255,453 @@ function createTopPerformersSection(topData) {
const title = document.createElement("h3");
title.className = "text-lg font-medium mb-4 dark:text-gray-200";
title.textContent = "Top Performers";
+ title.setAttribute("aria-label", "Top Performers Section");
section.appendChild(title);
- const grid = document.createElement("div");
- grid.className = "grid grid-cols-1 md:grid-cols-2 gap-4";
+ // Loading skeleton
+ const skeleton = document.createElement("div");
+ skeleton.className = "animate-pulse space-y-4";
+ skeleton.innerHTML = `
+
+ `;
+ section.appendChild(skeleton);
+
+ // Tabs
+ const tabsContainer = document.createElement("div");
+ tabsContainer.className =
+ "border-b border-gray-200 dark:border-gray-700";
+ const tabList = document.createElement("nav");
+ tabList.className = "-mb-px flex space-x-8 overflow-x-auto";
+ tabList.setAttribute("aria-label", "Top Performers Tabs");
+
+ const entityTypes = [
+ "tools",
+ "resources",
+ "prompts",
+ "gateways",
+ "servers",
+ ];
+ entityTypes.forEach((type, index) => {
+ if (topData[type] && Array.isArray(topData[type])) {
+ const tab = createTab(type, index === 0);
+ tabList.appendChild(tab);
+ }
+ });
- // Top Tools
- if (topData.tools && Array.isArray(topData.tools)) {
- const toolsCard = createTopItemCard("Tools", topData.tools);
- grid.appendChild(toolsCard);
- }
+ tabsContainer.appendChild(tabList);
+ section.appendChild(tabsContainer);
- // Top Resources
- if (topData.resources && Array.isArray(topData.resources)) {
- const resourcesCard = createTopItemCard(
- "Resources",
- topData.resources,
- );
- grid.appendChild(resourcesCard);
- }
+ // Content panels
+ const contentContainer = document.createElement("div");
+ contentContainer.className = "mt-4";
- // Top Prompts
- if (topData.prompts && Array.isArray(topData.prompts)) {
- const promptsCard = createTopItemCard("Prompts", topData.prompts);
- grid.appendChild(promptsCard);
- }
+ entityTypes.forEach((type, index) => {
+ if (topData[type] && Array.isArray(topData[type])) {
+ const panel = createTopPerformersTable(
+ type,
+ topData[type],
+ index === 0,
+ );
+ contentContainer.appendChild(panel);
+ }
+ });
- // Top Servers
- if (topData.servers && Array.isArray(topData.servers)) {
- const serversCard = createTopItemCard("Servers", topData.servers);
- grid.appendChild(serversCard);
- }
+ section.appendChild(contentContainer);
+
+ // Remove skeleton once data is loaded
+ setTimeout(() => skeleton.remove(), 500); // Simulate async data load
+
+ // Export button
+ const exportButton = document.createElement("button");
+ exportButton.className =
+ "mt-4 bg-indigo-600 text-white px-4 py-2 rounded hover:bg-indigo-700 dark:bg-indigo-500 dark:hover:bg-indigo-600";
+ exportButton.textContent = "Export Metrics";
+ exportButton.onclick = () => exportMetricsToCSV(topData);
+ section.appendChild(exportButton);
- section.appendChild(grid);
return section;
} catch (error) {
- console.error("Error creating top performers section:", error);
- return document.createElement("div"); // Safe fallback
+ console.error("Error creating enhanced top performers section:", error);
+ showErrorMessage("Failed to load top performers section");
+ return document.createElement("div");
+ }
+}
+function calculateSuccessRate(item) {
+ // API returns successRate directly as a percentage
+ if (item.successRate !== undefined && item.successRate !== null) {
+ return Math.round(item.successRate);
}
+ // Fallback for legacy format (if needed)
+ const total =
+ item.execution_count || item.executions || item.executionCount || 0;
+ const successful = item.successful_count || item.successfulExecutions || 0;
+ return total > 0 ? Math.round((successful / total) * 100) : 0;
}
-/**
- * SECURITY: Create top item card with safe content handling
- */
-function createTopItemCard(title, items) {
- try {
- const card = document.createElement("div");
- card.className = "bg-gray-50 rounded p-4 dark:bg-gray-700";
+function formatNumber(num) {
+ return new Intl.NumberFormat().format(num);
+}
+
+function formatLastUsed(timestamp) {
+ if (!timestamp) {
+ return "Never";
+ }
+
+ const date = new Date(timestamp);
+ const now = new Date();
+ const diffMs = now - date;
+ const diffMins = Math.floor(diffMs / 60000);
+
+ if (diffMins < 1) {
+ return "Just now";
+ }
+ if (diffMins < 60) {
+ return `${diffMins} min ago`;
+ }
+ if (diffMins < 1440) {
+ return `${Math.floor(diffMins / 60)} hours ago`;
+ }
+ if (diffMins < 10080) {
+ return `${Math.floor(diffMins / 1440)} days ago`;
+ }
- const cardTitle = document.createElement("h4");
- cardTitle.className = "font-medium mb-2 dark:text-gray-200";
- cardTitle.textContent = `Top ${title}`;
- card.appendChild(cardTitle);
+ return date.toLocaleDateString();
+}
+function createTopPerformersTable(entityType, data, isActive) {
+ const panel = document.createElement("div");
+ panel.id = `top-${entityType}-panel`;
+ panel.className = `transition-opacity duration-300 ${isActive ? "opacity-100" : "hidden opacity-0"}`;
+ panel.setAttribute("role", "tabpanel");
+ panel.setAttribute("aria-labelledby", `top-${entityType}-tab`);
+
+ if (data.length === 0) {
+ const emptyState = document.createElement("p");
+ emptyState.className =
+ "text-gray-500 dark:text-gray-400 text-center py-4";
+ emptyState.textContent = `No ${entityType} data available`;
+ panel.appendChild(emptyState);
+ return panel;
+ }
- const list = document.createElement("ul");
- list.className = "space-y-1";
+ // Responsive table wrapper
+ const tableWrapper = document.createElement("div");
+ tableWrapper.className = "overflow-x-auto sm:overflow-x-visible";
+
+ const table = document.createElement("table");
+ table.className =
+ "min-w-full divide-y divide-gray-200 dark:divide-gray-700";
+
+ // Table header
+ const thead = document.createElement("thead");
+ thead.className =
+ "bg-gray-50 dark:bg-gray-700 hidden sm:table-header-group";
+ const headerRow = document.createElement("tr");
+ const headers = [
+ "Rank",
+ "Name",
+ "Executions",
+ "Avg Response Time",
+ "Success Rate",
+ "Last Used",
+ ];
- items.slice(0, 5).forEach((item) => {
- const listItem = document.createElement("li");
- listItem.className =
- "text-sm text-gray-600 dark:text-gray-300 flex justify-between";
+ headers.forEach((headerText, index) => {
+ const th = document.createElement("th");
+ th.className =
+ "px-6 py-3 text-left text-xs font-medium text-gray-500 dark:text-gray-300 uppercase tracking-wider";
+ th.setAttribute("scope", "col");
+ th.textContent = headerText;
+ if (index === 0) {
+ th.setAttribute("aria-sort", "ascending");
+ }
+ headerRow.appendChild(th);
+ });
+
+ thead.appendChild(headerRow);
+ table.appendChild(thead);
+
+ // Table body
+ const tbody = document.createElement("tbody");
+ tbody.className =
+ "bg-white dark:bg-gray-800 divide-y divide-gray-200 dark:divide-gray-700";
+
+ // Pagination (if > 5 items)
+ const paginatedData = data.slice(0, 5); // Limit to top 5
+ paginatedData.forEach((item, index) => {
+ const row = document.createElement("tr");
+ row.className =
+ "hover:bg-gray-50 dark:hover:bg-gray-700 transition-colors duration-200";
+
+ // Rank
+ const rankCell = document.createElement("td");
+ rankCell.className =
+ "px-6 py-4 whitespace-nowrap text-sm font-medium text-gray-900 dark:text-gray-100 sm:px-6 sm:py-4";
+ const rankBadge = document.createElement("span");
+ rankBadge.className = `inline-flex items-center justify-center w-6 h-6 rounded-full ${
+ index === 0
+ ? "bg-yellow-400 text-yellow-900"
+ : index === 1
+ ? "bg-gray-300 text-gray-900"
+ : index === 2
+ ? "bg-orange-400 text-orange-900"
+ : "bg-gray-100 text-gray-600"
+ }`;
+ rankBadge.textContent = index + 1;
+ rankBadge.setAttribute("aria-label", `Rank ${index + 1}`);
+ rankCell.appendChild(rankBadge);
+ row.appendChild(rankCell);
+
+ // Name (clickable for drill-down)
+ const nameCell = document.createElement("td");
+ nameCell.className =
+ "px-6 py-4 whitespace-nowrap text-sm text-indigo-600 dark:text-indigo-400 cursor-pointer";
+ nameCell.textContent = escapeHtml(item.name || "Unknown");
+ // nameCell.onclick = () => showDetailedMetrics(entityType, item.id);
+ nameCell.setAttribute("role", "button");
+ nameCell.setAttribute(
+ "aria-label",
+ `View details for ${item.name || "Unknown"}`,
+ );
+ row.appendChild(nameCell);
+
+ // Executions
+ const execCell = document.createElement("td");
+ execCell.className =
+ "px-6 py-4 whitespace-nowrap text-sm text-gray-500 dark:text-gray-300 sm:px-6 sm:py-4";
+ execCell.textContent = formatNumber(
+ item.executionCount || item.execution_count || item.executions || 0,
+ );
+ row.appendChild(execCell);
+
+ // Avg Response Time
+ const avgTimeCell = document.createElement("td");
+ avgTimeCell.className =
+ "px-6 py-4 whitespace-nowrap text-sm text-gray-500 dark:text-gray-300 sm:px-6 sm:py-4";
+ const avgTime = item.avg_response_time || item.avgResponseTime;
+ avgTimeCell.textContent = avgTime ? `${Math.round(avgTime)}ms` : "N/A";
+ row.appendChild(avgTimeCell);
+
+ // Success Rate
+ const successCell = document.createElement("td");
+ successCell.className =
+ "px-6 py-4 whitespace-nowrap text-sm sm:px-6 sm:py-4";
+ const successRate = calculateSuccessRate(item);
+ const successBadge = document.createElement("span");
+ successBadge.className = `inline-flex items-center px-2.5 py-0.5 rounded-full text-xs font-medium ${
+ successRate >= 95
+ ? "bg-green-100 text-green-800 dark:bg-green-800 dark:text-green-100"
+ : successRate >= 80
+ ? "bg-yellow-100 text-yellow-800 dark:bg-yellow-800 dark:text-yellow-100"
+ : "bg-red-100 text-red-800 dark:bg-red-800 dark:text-red-100"
+ }`;
+ successBadge.textContent = `${successRate}%`;
+ successBadge.setAttribute(
+ "aria-label",
+ `Success rate: ${successRate}%`,
+ );
+ successCell.appendChild(successBadge);
+ row.appendChild(successCell);
+
+ // Last Used
+ const lastUsedCell = document.createElement("td");
+ lastUsedCell.className =
+ "px-6 py-4 whitespace-nowrap text-sm text-gray-500 dark:text-gray-300 sm:px-6 sm:py-4";
+ lastUsedCell.textContent = formatLastUsed(
+ item.last_execution || item.lastExecution,
+ );
+ row.appendChild(lastUsedCell);
- const nameSpan = document.createElement("span");
- nameSpan.textContent = item.name || "Unknown";
+ tbody.appendChild(row);
+ });
- const countSpan = document.createElement("span");
- countSpan.className = "font-medium";
- countSpan.textContent = String(item.executions || 0);
+ table.appendChild(tbody);
+ tableWrapper.appendChild(table);
+ panel.appendChild(tableWrapper);
- listItem.appendChild(nameSpan);
- listItem.appendChild(countSpan);
- list.appendChild(listItem);
+ // Pagination controls (if needed)
+ if (data.length > 5) {
+ const pagination = createPaginationControls(data.length, 5, (page) => {
+ updateTableRows(panel, entityType, data, page);
});
+ panel.appendChild(pagination);
+ }
- card.appendChild(list);
- return card;
- } catch (error) {
- console.error("Error creating top item card:", error);
- return document.createElement("div"); // Safe fallback
+ return panel;
+}
+
+function createTab(type, isActive) {
+ const tab = document.createElement("a");
+ tab.href = "#";
+ tab.id = `top-${type}-tab`;
+ tab.className = `${
+ isActive
+ ? "border-indigo-500 text-indigo-600 dark:text-indigo-400"
+ : "border-transparent text-gray-500 hover:text-gray-700 hover:border-gray-300 dark:text-gray-400 dark:hover:text-gray-300"
+ } whitespace-nowrap py-4 px-1 border-b-2 font-medium text-sm capitalize transition-colors duration-200 sm:py-4 sm:px-1`;
+ tab.textContent = type;
+ tab.setAttribute("role", "tab");
+ tab.setAttribute("aria-controls", `top-${type}-panel`);
+ tab.setAttribute("aria-selected", isActive.toString());
+ tab.onclick = (e) => {
+ e.preventDefault();
+ showTopPerformerTab(type);
+ };
+ return tab;
+}
+
+function showTopPerformerTab(activeType) {
+ const entityTypes = [
+ "tools",
+ "resources",
+ "prompts",
+ "gateways",
+ "servers",
+ ];
+ entityTypes.forEach((type) => {
+ const panel = document.getElementById(`top-${type}-panel`);
+ const tab = document.getElementById(`top-${type}-tab`);
+ if (panel) {
+ panel.classList.toggle("hidden", type !== activeType);
+ panel.classList.toggle("opacity-100", type === activeType);
+ panel.classList.toggle("opacity-0", type !== activeType);
+ panel.setAttribute("aria-hidden", type !== activeType);
+ }
+ if (tab) {
+ tab.classList.toggle("border-indigo-500", type === activeType);
+ tab.classList.toggle("text-indigo-600", type === activeType);
+ tab.classList.toggle("dark:text-indigo-400", type === activeType);
+ tab.classList.toggle("border-transparent", type !== activeType);
+ tab.classList.toggle("text-gray-500", type !== activeType);
+ tab.setAttribute("aria-selected", type === activeType);
+ }
+ });
+}
+
+function createPaginationControls(totalItems, itemsPerPage, onPageChange) {
+ const pagination = document.createElement("div");
+ pagination.className = "mt-4 flex justify-end space-x-2";
+ const totalPages = Math.ceil(totalItems / itemsPerPage);
+
+ for (let page = 1; page <= totalPages; page++) {
+ const button = document.createElement("button");
+ button.className = `px-3 py-1 rounded ${page === 1 ? "bg-indigo-600 text-white" : "bg-gray-200 text-gray-700 dark:bg-gray-700 dark:text-gray-300"}`;
+ button.textContent = page;
+ button.onclick = () => {
+ onPageChange(page);
+ pagination.querySelectorAll("button").forEach((btn) => {
+ btn.className = `px-3 py-1 rounded ${btn === button ? "bg-indigo-600 text-white" : "bg-gray-200 text-gray-700 dark:bg-gray-700 dark:text-gray-300"}`;
+ });
+ };
+ pagination.appendChild(button);
}
+
+ return pagination;
+}
+
+function updateTableRows(panel, entityType, data, page) {
+ const tbody = panel.querySelector("tbody");
+ tbody.innerHTML = "";
+ const start = (page - 1) * 5;
+ const paginatedData = data.slice(start, start + 5);
+
+ paginatedData.forEach((item, index) => {
+ const row = document.createElement("tr");
+ // ... (same row creation logic as in createTopPerformersTable)
+ tbody.appendChild(row);
+ });
+}
+
+function exportMetricsToCSV(topData) {
+ const headers = [
+ "Entity Type",
+ "Rank",
+ "Name",
+ "Executions",
+ "Avg Response Time",
+ "Success Rate",
+ "Last Used",
+ ];
+ const rows = [];
+
+ ["tools", "resources", "prompts", "gateways", "servers"].forEach((type) => {
+ if (topData[type] && Array.isArray(topData[type])) {
+ topData[type].forEach((item, index) => {
+ rows.push([
+ type,
+ index + 1,
+ `"${escapeHtml(item.name || "Unknown")}"`,
+ formatNumber(
+ item.executionCount ||
+ item.execution_count ||
+ item.executions ||
+ 0,
+ ),
+ item.avg_response_time || item.avgResponseTime
+ ? `${Math.round(item.avg_response_time || item.avgResponseTime)}ms`
+ : "N/A",
+ `${calculateSuccessRate(item)}%`,
+ formatLastUsed(item.last_execution || item.lastExecution),
+ ]);
+ });
+ }
+ });
+
+ const csv = [headers.join(","), ...rows.map((row) => row.join(","))].join(
+ "\n",
+ );
+ const blob = new Blob([csv], { type: "text/csv" });
+ const url = URL.createObjectURL(blob);
+ const a = document.createElement("a");
+ a.href = url;
+ a.download = `top_performers_${new Date().toISOString()}.csv`;
+ a.click();
+ URL.revokeObjectURL(url);
}
+/**
+ * SECURITY: Create top item card with safe content handling
+ */
+// function createTopItemCard(title, items) {
+// try {
+// const card = document.createElement("div");
+// card.className = "bg-gray-50 rounded p-4 dark:bg-gray-700";
+
+// const cardTitle = document.createElement("h4");
+// cardTitle.className = "font-medium mb-2 dark:text-gray-200";
+// cardTitle.textContent = `Top ${title}`;
+// card.appendChild(cardTitle);
+
+// const list = document.createElement("ul");
+// list.className = "space-y-1";
+
+// items.slice(0, 5).forEach((item) => {
+// const listItem = document.createElement("li");
+// listItem.className =
+// "text-sm text-gray-600 dark:text-gray-300 flex justify-between";
+
+// const nameSpan = document.createElement("span");
+// nameSpan.textContent = item.name || "Unknown";
+
+// const countSpan = document.createElement("span");
+// countSpan.className = "font-medium";
+// countSpan.textContent = String(item.executions || 0);
+
+// listItem.appendChild(nameSpan);
+// listItem.appendChild(countSpan);
+// list.appendChild(listItem);
+// });
+
+// card.appendChild(list);
+// return card;
+// } catch (error) {
+// console.error("Error creating top item card:", error);
+// return document.createElement("div"); // Safe fallback
+// }
+// }
+
/**
* SECURITY: Create performance metrics card with safe display
*/
diff --git a/mcpgateway/utils/metrics_common.py b/mcpgateway/utils/metrics_common.py
new file mode 100644
index 00000000..b10cfff6
--- /dev/null
+++ b/mcpgateway/utils/metrics_common.py
@@ -0,0 +1,65 @@
+# -*- coding: utf-8 -*-
+"""
+Common utilities for metrics handling across service modules.
+
+Copyright 2025
+SPDX-License-Identifier: Apache-2.0
+Authors: Mihai Criveti
+"""
+
+# Standard
+from typing import List
+
+# First-Party
+from mcpgateway.schemas import TopPerformer
+
+
+def build_top_performers(results: List) -> List[TopPerformer]:
+ """
+ Convert database query results to TopPerformer objects.
+
+ This utility function eliminates code duplication across service modules
+ that need to convert database query results with metrics into TopPerformer objects.
+
+ Args:
+ results: List of database query results, each containing:
+ - id: Entity ID
+ - name: Entity name
+ - execution_count: Total executions
+ - avg_response_time: Average response time
+ - success_rate: Success rate percentage
+ - last_execution: Last execution timestamp
+
+ Returns:
+ List[TopPerformer]: List of TopPerformer objects with proper type conversions
+
+ Examples:
+ >>> from unittest.mock import MagicMock
+ >>> result = MagicMock()
+ >>> result.id = 1
+ >>> result.name = "test"
+ >>> result.execution_count = 10
+ >>> result.avg_response_time = 1.5
+ >>> result.success_rate = 85.0
+ >>> result.last_execution = None
+ >>> performers = build_top_performers([result])
+ >>> len(performers)
+ 1
+ >>> performers[0].id
+ 1
+ >>> performers[0].execution_count
+ 10
+ >>> performers[0].avg_response_time
+ 1.5
+ """
+ return [
+ TopPerformer(
+ id=result.id,
+ name=result.name,
+ execution_count=result.execution_count or 0,
+ avg_response_time=float(result.avg_response_time) if result.avg_response_time else None,
+ success_rate=float(result.success_rate) if result.success_rate else None,
+ last_execution=result.last_execution,
+ )
+ for result in results
+ ]
diff --git a/tests/e2e/test_main_apis.py b/tests/e2e/test_main_apis.py
index 3ff0e72b..21e766e4 100644
--- a/tests/e2e/test_main_apis.py
+++ b/tests/e2e/test_main_apis.py
@@ -742,8 +742,8 @@ async def test_tool_name_conflict(self, client: AsyncClient, mock_auth):
# Try to create duplicate - might succeed with different ID
response = await client.post("/tools", json=tool_data, headers=TEST_AUTH_HEADER)
- # Accept 409 Conflict as valid for duplicate
- assert response.status_code in [200, 409]
+ # Accept 400, 409, or 200 as valid responses for duplicate
+ assert response.status_code in [200, 400, 409]
if response.status_code == 400:
assert "already exists" in response.json()["detail"]
diff --git a/tests/unit/mcpgateway/cache/test_session_registry_extended.py b/tests/unit/mcpgateway/cache/test_session_registry_extended.py
index 6a3195ad..a528e42e 100644
--- a/tests/unit/mcpgateway/cache/test_session_registry_extended.py
+++ b/tests/unit/mcpgateway/cache/test_session_registry_extended.py
@@ -31,7 +31,7 @@ def test_redis_import_error_flag(self):
import importlib
import mcpgateway.cache.session_registry
importlib.reload(mcpgateway.cache.session_registry)
-
+
# Should set REDIS_AVAILABLE = False
assert not mcpgateway.cache.session_registry.REDIS_AVAILABLE
@@ -41,7 +41,7 @@ def test_sqlalchemy_import_error_flag(self):
import importlib
import mcpgateway.cache.session_registry
importlib.reload(mcpgateway.cache.session_registry)
-
+
# Should set SQLALCHEMY_AVAILABLE = False
assert not mcpgateway.cache.session_registry.SQLALCHEMY_AVAILABLE
@@ -53,7 +53,7 @@ class TestNoneBackend:
async def test_none_backend_initialization_logging(self, caplog):
"""Test that 'none' backend logs initialization message."""
registry = SessionRegistry(backend="none")
-
+
# Check that initialization message is logged
assert "Session registry initialized with 'none' backend - session tracking disabled" in caplog.text
@@ -61,10 +61,10 @@ async def test_none_backend_initialization_logging(self, caplog):
async def test_none_backend_initialize_method(self):
"""Test 'none' backend initialize method does nothing."""
registry = SessionRegistry(backend="none")
-
+
# Should not raise any errors
await registry.initialize()
-
+
# No cleanup task should be created
assert registry._cleanup_task is None
@@ -78,22 +78,22 @@ async def test_redis_add_session_error(self, monkeypatch, caplog):
mock_redis = AsyncMock()
mock_redis.setex = AsyncMock(side_effect=Exception("Redis connection error"))
mock_redis.publish = AsyncMock()
-
+
with patch('mcpgateway.cache.session_registry.REDIS_AVAILABLE', True):
with patch('mcpgateway.cache.session_registry.Redis') as MockRedis:
MockRedis.from_url.return_value = mock_redis
-
+
registry = SessionRegistry(backend="redis", redis_url="redis://localhost")
-
+
class DummyTransport:
async def disconnect(self):
pass
async def is_connected(self):
return True
-
+
transport = DummyTransport()
await registry.add_session("test_session", transport)
-
+
# Should log the Redis error
assert "Redis error adding session test_session: Redis connection error" in caplog.text
@@ -102,15 +102,15 @@ async def test_redis_broadcast_error(self, monkeypatch, caplog):
"""Test Redis error during broadcast."""
mock_redis = AsyncMock()
mock_redis.publish = AsyncMock(side_effect=Exception("Redis publish error"))
-
+
with patch('mcpgateway.cache.session_registry.REDIS_AVAILABLE', True):
with patch('mcpgateway.cache.session_registry.Redis') as MockRedis:
MockRedis.from_url.return_value = mock_redis
-
+
registry = SessionRegistry(backend="redis", redis_url="redis://localhost")
-
+
await registry.broadcast("test_session", {"test": "message"})
-
+
# Should log the Redis error
assert "Redis error during broadcast: Redis publish error" in caplog.text
@@ -118,7 +118,7 @@ async def test_redis_broadcast_error(self, monkeypatch, caplog):
class TestDatabaseBackendErrors:
"""Test database backend error scenarios."""
- @pytest.mark.asyncio
+ @pytest.mark.asyncio
async def test_database_add_session_error(self, monkeypatch, caplog):
"""Test database error during add_session."""
def mock_get_db():
@@ -127,24 +127,24 @@ def mock_get_db():
mock_session.rollback = Mock()
mock_session.close = Mock()
yield mock_session
-
+
with patch('mcpgateway.cache.session_registry.SQLALCHEMY_AVAILABLE', True):
with patch('mcpgateway.cache.session_registry.get_db', mock_get_db):
with patch('asyncio.to_thread') as mock_to_thread:
# Simulate the database error being raised from the thread
mock_to_thread.side_effect = Exception("Database connection error")
-
+
registry = SessionRegistry(backend="database", database_url="sqlite:///test.db")
-
+
class DummyTransport:
async def disconnect(self):
pass
async def is_connected(self):
return True
-
+
transport = DummyTransport()
await registry.add_session("test_session", transport)
-
+
# Should log the database error
assert "Database error adding session test_session: Database connection error" in caplog.text
@@ -157,17 +157,17 @@ def mock_get_db():
mock_session.rollback = Mock()
mock_session.close = Mock()
yield mock_session
-
+
with patch('mcpgateway.cache.session_registry.SQLALCHEMY_AVAILABLE', True):
with patch('mcpgateway.cache.session_registry.get_db', mock_get_db):
with patch('asyncio.to_thread') as mock_to_thread:
# Simulate the database error being raised from the thread
mock_to_thread.side_effect = Exception("Database broadcast error")
-
+
registry = SessionRegistry(backend="database", database_url="sqlite:///test.db")
-
+
await registry.broadcast("test_session", {"test": "message"})
-
+
# Should log the database error
assert "Database error during broadcast: Database broadcast error" in caplog.text
@@ -180,16 +180,16 @@ async def test_memory_backend_initialization_logging(self, caplog):
"""Test memory backend initialization creates cleanup task."""
registry = SessionRegistry(backend="memory")
await registry.initialize()
-
+
try:
# Should log initialization
assert "Initializing session registry with backend: memory" in caplog.text
assert "Memory cleanup task started" in caplog.text
-
+
# Should have created cleanup task
assert registry._cleanup_task is not None
assert not registry._cleanup_task.done()
-
+
finally:
await registry.shutdown()
@@ -199,16 +199,16 @@ async def test_database_backend_initialization_logging(self, caplog):
with patch('mcpgateway.cache.session_registry.SQLALCHEMY_AVAILABLE', True):
registry = SessionRegistry(backend="database", database_url="sqlite:///test.db")
await registry.initialize()
-
+
try:
- # Should log initialization
+ # Should log initialization
assert "Initializing session registry with backend: database" in caplog.text
assert "Database cleanup task started" in caplog.text
-
+
# Should have created cleanup task
assert registry._cleanup_task is not None
assert not registry._cleanup_task.done()
-
+
finally:
await registry.shutdown()
@@ -218,18 +218,18 @@ async def test_redis_initialization_subscribe(self, monkeypatch):
mock_redis = AsyncMock()
mock_pubsub = AsyncMock()
mock_redis.pubsub = Mock(return_value=mock_pubsub) # Use Mock for sync method
-
+
with patch('mcpgateway.cache.session_registry.REDIS_AVAILABLE', True):
with patch('mcpgateway.cache.session_registry.Redis') as MockRedis:
MockRedis.from_url.return_value = mock_redis
-
+
registry = SessionRegistry(backend="redis", redis_url="redis://localhost")
await registry.initialize()
-
+
try:
# Should have subscribed to events channel
mock_pubsub.subscribe.assert_called_once_with("mcp_session_events")
-
+
finally:
await registry.shutdown()
@@ -238,12 +238,12 @@ async def test_shutdown_cancels_cleanup_task(self):
"""Test shutdown properly cancels cleanup tasks."""
registry = SessionRegistry(backend="memory")
await registry.initialize()
-
+
original_task = registry._cleanup_task
assert not original_task.cancelled()
-
+
await registry.shutdown()
-
+
# Task should be cancelled
assert original_task.cancelled()
@@ -252,9 +252,9 @@ async def test_shutdown_handles_already_cancelled_task(self):
"""Test shutdown handles already cancelled cleanup task."""
registry = SessionRegistry(backend="memory")
await registry.initialize()
-
+
# Cancel task before shutdown
registry._cleanup_task.cancel()
-
+
# Shutdown should not raise error
- await registry.shutdown()
\ No newline at end of file
+ await registry.shutdown()
diff --git a/tests/unit/mcpgateway/services/test_resource_service.py b/tests/unit/mcpgateway/services/test_resource_service.py
index 1504d43c..b4da0575 100644
--- a/tests/unit/mcpgateway/services/test_resource_service.py
+++ b/tests/unit/mcpgateway/services/test_resource_service.py
@@ -1298,5 +1298,128 @@ async def test_update_resource_error(self, resource_service, mock_db, mock_resou
mock_db.rollback.assert_called_once()
+class TestResourceServiceMetricsExtended:
+ """Extended tests for resource service metrics."""
+
+ @pytest.mark.asyncio
+ async def test_list_resources_with_tags(self, resource_service, mock_db, mock_resource):
+ """Test listing resources with tag filtering."""
+ from sqlalchemy import func
+
+ # Mock query chain
+ mock_query = MagicMock()
+ mock_query.where.return_value = mock_query
+ mock_db.execute.return_value.scalars.return_value.all.return_value = [mock_resource]
+
+ with patch("mcpgateway.services.resource_service.select", return_value=mock_query):
+ with patch("mcpgateway.services.resource_service.func") as mock_func:
+ mock_func.json_contains.return_value = MagicMock()
+ mock_func.or_.return_value = MagicMock()
+
+ result = await resource_service.list_resources(
+ mock_db, tags=["test", "production"]
+ )
+
+ # Verify tag filtering was applied
+ assert mock_func.json_contains.call_count == 2
+ mock_func.or_.assert_called_once()
+ assert len(result) == 1
+
+ @pytest.mark.asyncio
+ async def test_subscribe_events_with_uri(self, resource_service):
+ """Test subscribing to events for specific URI."""
+ test_uri = "test://resource"
+ test_event = {"type": "resource_updated", "data": {"uri": test_uri}}
+
+ # Start subscription
+ subscriber = resource_service.subscribe_events(uri=test_uri)
+ subscription_task = asyncio.create_task(subscriber.__anext__())
+
+ # Allow subscription to register
+ await asyncio.sleep(0.01)
+
+ # Publish event to specific URI
+ await resource_service._publish_event(test_uri, test_event)
+
+ # Receive event
+ received = await asyncio.wait_for(subscription_task, timeout=0.1)
+ assert received == test_event
+
+ # Clean up
+ await subscriber.aclose()
+
+ # Verify cleanup
+ assert test_uri not in resource_service._event_subscribers
+
+ @pytest.mark.asyncio
+ async def test_subscribe_events_global(self, resource_service):
+ """Test subscribing to all events."""
+ test_event = {"type": "resource_created", "data": {"uri": "any://resource"}}
+
+ # Start global subscription
+ subscriber = resource_service.subscribe_events(uri=None)
+ subscription_task = asyncio.create_task(subscriber.__anext__())
+
+ await asyncio.sleep(0.01)
+
+ # Publish event to any URI
+ await resource_service._publish_event("any://resource", test_event)
+
+ received = await asyncio.wait_for(subscription_task, timeout=0.1)
+ assert received == test_event
+
+ await subscriber.aclose()
+
+ # Verify cleanup of global subscribers
+ assert "*" not in resource_service._event_subscribers
+
+ @pytest.mark.asyncio
+ async def test_read_template_resource_not_found(self, resource_service):
+ """Test reading template resource that doesn't exist."""
+ with pytest.raises(ResourceNotFoundError, match="No template matches URI"):
+ await resource_service._read_template_resource("template://nonexistent/{id}")
+
+ @pytest.mark.asyncio
+ async def test_get_top_resources(self, resource_service, mock_db):
+ """Test getting top performing resources."""
+ # Mock query results
+ mock_result1 = MagicMock()
+ mock_result1.id = 1
+ mock_result1.name = "resource1"
+ mock_result1.execution_count = 10
+ mock_result1.avg_response_time = 1.5
+ mock_result1.success_rate = 100.0
+ mock_result1.last_execution = "2025-01-10T12:00:00"
+
+ mock_result2 = MagicMock()
+ mock_result2.id = 2
+ mock_result2.name = "resource2"
+ mock_result2.execution_count = 7
+ mock_result2.avg_response_time = 2.3
+ mock_result2.success_rate = 71.43
+ mock_result2.last_execution = "2025-01-10T11:00:00"
+
+ # Mock the query chain
+ mock_query = MagicMock()
+ mock_query.outerjoin.return_value = mock_query
+ mock_query.group_by.return_value = mock_query
+ mock_query.order_by.return_value = mock_query
+ mock_query.limit.return_value = mock_query
+ mock_query.all.return_value = [mock_result1, mock_result2]
+
+ mock_db.query.return_value = mock_query
+
+ result = await resource_service.get_top_resources(mock_db, limit=2)
+
+ assert len(result) == 2
+ assert result[0].name == "resource1"
+ assert result[0].execution_count == 10
+ assert result[0].success_rate == 100.0
+
+ assert result[1].name == "resource2"
+ assert result[1].execution_count == 7
+ assert result[1].success_rate == pytest.approx(71.43, rel=0.01)
+
+
if __name__ == "__main__":
pytest.main([__file__, "-v"])
diff --git a/tests/unit/mcpgateway/services/test_tool_service.py b/tests/unit/mcpgateway/services/test_tool_service.py
index 511558f3..db8158ce 100644
--- a/tests/unit/mcpgateway/services/test_tool_service.py
+++ b/tests/unit/mcpgateway/services/test_tool_service.py
@@ -29,6 +29,7 @@
ToolNotFoundError,
ToolResult,
ToolService,
+ ToolValidationError,
)
from mcpgateway.utils.services_auth import encode_auth
@@ -324,13 +325,13 @@ async def test_register_tool_name_conflict(self, tool_service, mock_tool, test_d
request_type="POST",
)
- # Should raise IntegrityError due to UNIQUE constraint failure
+ # Should raise ToolError due to UNIQUE constraint failure (wrapped IntegrityError)
test_db.commit = Mock(side_effect=IntegrityError("UNIQUE constraint failed: tools.name", None, None))
- with pytest.raises(IntegrityError) as exc_info:
+ with pytest.raises(ToolError) as exc_info:
await tool_service.register_tool(test_db, tool_create)
- # Check the error message for UNIQUE constraint failure
- assert "UNIQUE constraint failed: tools.name" in str(exc_info.value)
+ # Check the error message for tool name conflict
+ assert "Tool already exists: test_tool" in str(exc_info.value)
@pytest.mark.asyncio
async def test_register_inactive_tool_name_conflict(self, tool_service, mock_tool, test_db):
@@ -350,13 +351,13 @@ async def test_register_inactive_tool_name_conflict(self, tool_service, mock_too
request_type="POST",
)
- # Should raise IntegrityError due to UNIQUE constraint failure
+ # Should raise ToolError due to UNIQUE constraint failure (wrapped IntegrityError)
test_db.commit = Mock(side_effect=IntegrityError("UNIQUE constraint failed: tools.name", None, None))
- with pytest.raises(IntegrityError) as exc_info:
+ with pytest.raises(ToolError) as exc_info:
await tool_service.register_tool(test_db, tool_create)
- # Check the error message for UNIQUE constraint failure
- assert "UNIQUE constraint failed: tools.name" in str(exc_info.value)
+ # Check the error message for tool name conflict
+ assert "Tool already exists: test_tool" in str(exc_info.value)
@pytest.mark.asyncio
async def test_register_tool_db_integrity_error(self, tool_service, test_db):
@@ -378,13 +379,13 @@ async def test_register_tool_db_integrity_error(self, tool_service, test_db):
request_type="POST",
)
- # Should raise IntegrityError
- with pytest.raises(IntegrityError) as exc_info:
+ # Should raise ToolError (wrapped IntegrityError)
+ with pytest.raises(ToolError) as exc_info:
await tool_service.register_tool(test_db, tool_create)
- # After exception, rollback should be called
- test_db.rollback.assert_called_once()
- assert "orig" in str(exc_info.value)
+ # Verify rollback was called
+ test_db.rollback.assert_called_once()
+ assert "Tool already exists: test_tool" in str(exc_info.value)
@pytest.mark.asyncio
async def test_list_tools(self, tool_service, mock_tool, test_db):
@@ -1650,3 +1651,220 @@ async def test_reset_metrics(self, tool_service, test_db):
# Verify DB operations with tool_id
test_db.execute.assert_called_once()
test_db.commit.assert_called_once()
+
+ async def test_record_tool_metric(self, tool_service, mock_tool):
+ """Test recording tool invocation metrics."""
+ # Set up test data
+ start_time = 100.0
+ success = True
+ error_message = None
+
+ # Mock database
+ mock_db = MagicMock()
+
+ # Mock time.monotonic to return a consistent value
+ with patch("mcpgateway.services.tool_service.time.monotonic", return_value=105.0):
+ # Mock ToolMetric class
+ with patch("mcpgateway.services.tool_service.ToolMetric") as MockToolMetric:
+ mock_metric_instance = MagicMock()
+ MockToolMetric.return_value = mock_metric_instance
+
+ # Call the method
+ await tool_service._record_tool_metric(mock_db, mock_tool, start_time, success, error_message)
+
+ # Verify ToolMetric was created with correct data
+ MockToolMetric.assert_called_once_with(
+ tool_id=mock_tool.id,
+ response_time=5.0, # 105.0 - 100.0
+ is_success=True,
+ error_message=None
+ )
+
+ # Verify DB operations
+ mock_db.add.assert_called_once_with(mock_metric_instance)
+ mock_db.commit.assert_called_once()
+
+ async def test_record_tool_metric_with_error(self, tool_service, mock_tool):
+ """Test recording tool invocation metrics with error."""
+ start_time = 100.0
+ success = False
+ error_message = "Connection timeout"
+
+ # Mock database
+ mock_db = MagicMock()
+
+ with patch("mcpgateway.services.tool_service.time.monotonic", return_value=102.5):
+ with patch("mcpgateway.services.tool_service.ToolMetric") as MockToolMetric:
+ mock_metric_instance = MagicMock()
+ MockToolMetric.return_value = mock_metric_instance
+
+ await tool_service._record_tool_metric(mock_db, mock_tool, start_time, success, error_message)
+
+ # Verify ToolMetric was created with error data
+ MockToolMetric.assert_called_once_with(
+ tool_id=mock_tool.id,
+ response_time=2.5,
+ is_success=False,
+ error_message="Connection timeout"
+ )
+
+ mock_db.add.assert_called_once_with(mock_metric_instance)
+ mock_db.commit.assert_called_once()
+
+ async def test_aggregate_metrics(self, tool_service):
+ """Test aggregating metrics across all tools."""
+ # Mock database
+ mock_db = MagicMock()
+
+ # Create a mock that returns scalar values
+ mock_execute_result = MagicMock()
+ mock_execute_result.scalar.side_effect = [
+ 10, # total count
+ 8, # successful count
+ 2, # failed count
+ 0.5, # min response time
+ 5.0, # max response time
+ 2.3, # avg response time
+ "2025-01-10T12:00:00" # last execution time
+ ]
+ mock_db.execute.return_value = mock_execute_result
+
+ result = await tool_service.aggregate_metrics(mock_db)
+
+ assert result == {
+ "total_executions": 10,
+ "successful_executions": 8,
+ "failed_executions": 2,
+ "failure_rate": 0.2, # 2/10
+ "min_response_time": 0.5,
+ "max_response_time": 5.0,
+ "avg_response_time": 2.3,
+ "last_execution_time": "2025-01-10T12:00:00"
+ }
+
+ # Verify all expected queries were made
+ assert mock_db.execute.call_count == 7
+
+ async def test_aggregate_metrics_no_data(self, tool_service):
+ """Test aggregating metrics when no data exists."""
+ # Mock database
+ mock_db = MagicMock()
+
+ # Create a mock that returns scalar values
+ mock_execute_result = MagicMock()
+ mock_execute_result.scalar.side_effect = [
+ 0, # total count
+ 0, # successful count
+ 0, # failed count
+ None, # min response time
+ None, # max response time
+ None, # avg response time
+ None # last execution time
+ ]
+ mock_db.execute.return_value = mock_execute_result
+
+ result = await tool_service.aggregate_metrics(mock_db)
+
+ assert result == {
+ "total_executions": 0,
+ "successful_executions": 0,
+ "failed_executions": 0,
+ "failure_rate": 0.0,
+ "min_response_time": None,
+ "max_response_time": None,
+ "avg_response_time": None,
+ "last_execution_time": None
+ }
+
+ async def test_validate_tool_url_success(self, tool_service):
+ """Test successful tool URL validation."""
+ # Mock successful HTTP response
+ mock_response = MagicMock()
+ mock_response.raise_for_status = MagicMock()
+ tool_service._http_client.get.return_value = mock_response
+
+ # Should not raise any exception
+ await tool_service._validate_tool_url("http://example.com/tool")
+
+ tool_service._http_client.get.assert_called_once_with("http://example.com/tool")
+ mock_response.raise_for_status.assert_called_once()
+
+ async def test_validate_tool_url_failure(self, tool_service):
+ """Test tool URL validation failure."""
+ # Mock HTTP error
+ tool_service._http_client.get.side_effect = Exception("Connection refused")
+
+ with pytest.raises(ToolValidationError, match="Failed to validate tool URL: Connection refused"):
+ await tool_service._validate_tool_url("http://example.com/tool")
+
+ async def test_check_tool_health_success(self, tool_service, mock_tool):
+ """Test successful tool health check."""
+ mock_response = MagicMock()
+ mock_response.is_success = True
+ tool_service._http_client.get.return_value = mock_response
+
+ result = await tool_service._check_tool_health(mock_tool)
+
+ assert result is True
+ tool_service._http_client.get.assert_called_once_with(mock_tool.url)
+
+ async def test_check_tool_health_failure(self, tool_service, mock_tool):
+ """Test failed tool health check."""
+ mock_response = MagicMock()
+ mock_response.is_success = False
+ tool_service._http_client.get.return_value = mock_response
+
+ result = await tool_service._check_tool_health(mock_tool)
+
+ assert result is False
+
+ async def test_check_tool_health_exception(self, tool_service, mock_tool):
+ """Test tool health check with exception."""
+ tool_service._http_client.get.side_effect = Exception("Network error")
+
+ result = await tool_service._check_tool_health(mock_tool)
+
+ assert result is False
+
+ async def test_subscribe_events(self, tool_service):
+ """Test event subscription mechanism."""
+ # Create an event to publish
+ test_event = {"type": "test_event", "data": {"id": 1}}
+
+ # Start subscription in background
+ subscriber = tool_service.subscribe_events()
+ subscription_task = asyncio.create_task(subscriber.__anext__())
+
+ # Give a moment for subscription to be registered
+ await asyncio.sleep(0.01)
+
+ # Publish event
+ await tool_service._publish_event(test_event)
+
+ # Get the event
+ received_event = await subscription_task
+ assert received_event == test_event
+
+ # Clean up
+ await subscriber.aclose()
+
+ async def test_notify_tool_added(self, tool_service, mock_tool):
+ """Test notification when tool is added."""
+ with patch.object(tool_service, '_publish_event', new_callable=AsyncMock) as mock_publish:
+ await tool_service._notify_tool_added(mock_tool)
+
+ mock_publish.assert_called_once()
+ event = mock_publish.call_args[0][0]
+ assert event["type"] == "tool_added"
+ assert event["data"]["id"] == mock_tool.id
+ assert event["data"]["name"] == mock_tool.name
+
+ async def test_notify_tool_removed(self, tool_service, mock_tool):
+ """Test notification when tool is removed."""
+ with patch.object(tool_service, '_publish_event', new_callable=AsyncMock) as mock_publish:
+ await tool_service._notify_tool_removed(mock_tool)
+
+ mock_publish.assert_called_once()
+ event = mock_publish.call_args[0][0]
+ assert event["type"] == "tool_removed"
+ assert event["data"]["id"] == mock_tool.id
diff --git a/tests/unit/mcpgateway/test_admin.py b/tests/unit/mcpgateway/test_admin.py
index d711caa6..af156548 100644
--- a/tests/unit/mcpgateway/test_admin.py
+++ b/tests/unit/mcpgateway/test_admin.py
@@ -41,7 +41,8 @@
admin_edit_server,
admin_edit_tool,
admin_get_gateway,
- admin_get_metrics,
+ # admin_get_metrics,
+ get_aggregated_metrics,
admin_get_prompt,
admin_get_resource,
admin_get_server,
@@ -997,7 +998,11 @@ class TestAdminMetricsRoutes:
@patch.object(ResourceService, "aggregate_metrics", new_callable=AsyncMock)
@patch.object(ServerService, "aggregate_metrics", new_callable=AsyncMock)
@patch.object(PromptService, "aggregate_metrics", new_callable=AsyncMock)
- async def test_admin_get_metrics_with_nulls(self, mock_prompt_metrics, mock_server_metrics, mock_resource_metrics, mock_tool_metrics, mock_db):
+ @patch.object(ToolService, "get_top_tools", new_callable=AsyncMock)
+ @patch.object(ResourceService, "get_top_resources", new_callable=AsyncMock)
+ @patch.object(ServerService, "get_top_servers", new_callable=AsyncMock)
+ @patch.object(PromptService, "get_top_prompts", new_callable=AsyncMock)
+ async def test_admin_get_metrics_with_nulls(self, mock_prompt_top, mock_server_top, mock_resource_top, mock_tool_top, mock_prompt_metrics, mock_server_metrics, mock_resource_metrics, mock_tool_metrics, mock_db):
"""Test getting metrics with null values."""
# Some services return metrics with null values
mock_tool_metrics.return_value = ToolMetrics(
@@ -1025,12 +1030,23 @@ async def test_admin_get_metrics_with_nulls(self, mock_prompt_metrics, mock_serv
mock_server_metrics.return_value = None # No metrics available
mock_prompt_metrics.return_value = None
- result = await admin_get_metrics(mock_db, "test-user")
+ # Mock top performers to return empty lists
+ mock_tool_top.return_value = []
+ mock_resource_top.return_value = []
+ mock_server_top.return_value = []
+ mock_prompt_top.return_value = []
+
+ # result = await admin_get_metrics(mock_db, "test-user")
+ result = await get_aggregated_metrics(mock_db)
assert result["tools"].total_executions == 0
assert result["resources"].total_executions == 100
assert result["servers"] is None
assert result["prompts"] is None
+ # Check that topPerformers structure exists
+ assert "topPerformers" in result
+ assert result["topPerformers"]["tools"] == []
+ assert result["topPerformers"]["resources"] == []
@patch.object(ToolService, "reset_metrics", new_callable=AsyncMock)
@patch.object(ResourceService, "reset_metrics", new_callable=AsyncMock)