From cc556d9993a67e57632a849ffdecc1f8f6c8e56c Mon Sep 17 00:00:00 2001 From: IrushaBasukala Date: Sat, 2 Aug 2025 18:57:57 +0545 Subject: [PATCH 01/11] enhance metrics tab ui Signed-off-by: IrushaBasukala --- alembic.ini | 147 +++++ .../alembic/README.md => alembic/README | 4 +- alembic/env.py | 187 ++++++ .../alembic => alembic}/script.py.mako | 0 .../b77ca9d2de7e_uuid_pk_and_slug_refactor.py | 552 ++++++++++++++++++ .../e4fc04d1a442_add_annotations_to_tables.py | 2 +- ...490e949b1_add_improved_status_to_tables.py | 2 +- mcpgateway/admin.py | 163 ++---- mcpgateway/handlers/sampling.py | 2 +- mcpgateway/migrations/env.py | 78 +++ mcpgateway/schemas.py | 9 + mcpgateway/services/gateway_service.py | 3 + mcpgateway/services/prompt_service.py | 32 +- mcpgateway/services/resource_service.py | 30 +- mcpgateway/services/server_service.py | 33 +- mcpgateway/services/tool_service.py | 39 +- mcpgateway/static/admin.js | 381 ++++++++++-- 17 files changed, 1501 insertions(+), 163 deletions(-) create mode 100644 alembic.ini rename mcpgateway/alembic/README.md => alembic/README (98%) create mode 100644 alembic/env.py rename {mcpgateway/alembic => alembic}/script.py.mako (100%) create mode 100644 alembic/versions/b77ca9d2de7e_uuid_pk_and_slug_refactor.py rename {mcpgateway/alembic => alembic}/versions/e4fc04d1a442_add_annotations_to_tables.py (97%) rename {mcpgateway/alembic => alembic}/versions/e75490e949b1_add_improved_status_to_tables.py (97%) create mode 100644 mcpgateway/migrations/env.py diff --git a/alembic.ini b/alembic.ini new file mode 100644 index 00000000..07918167 --- /dev/null +++ b/alembic.ini @@ -0,0 +1,147 @@ +# A generic, single database configuration. + +[alembic] +# path to migration scripts. +# this is typically a path given in POSIX (e.g. forward slashes) +# format, relative to the token %(here)s which refers to the location of this +# ini file +script_location = %(here)s/alembic + +# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s +# Uncomment the line below if you want the files to be prepended with date and time +# see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file +# for all available tokens +# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s + +# sys.path path, will be prepended to sys.path if present. +# defaults to the current working directory. for multiple paths, the path separator +# is defined by "path_separator" below. +prepend_sys_path = . + + +# timezone to use when rendering the date within the migration file +# as well as the filename. +# If specified, requires the python>=3.9 or backports.zoneinfo library and tzdata library. +# Any required deps can installed by adding `alembic[tz]` to the pip requirements +# string value is passed to ZoneInfo() +# leave blank for localtime +# timezone = + +# max length of characters to apply to the "slug" field +# truncate_slug_length = 40 + +# set to 'true' to run the environment during +# the 'revision' command, regardless of autogenerate +# revision_environment = false + +# set to 'true' to allow .pyc and .pyo files without +# a source .py file to be detected as revisions in the +# versions/ directory +# sourceless = false + +# version location specification; This defaults +# to /versions. When using multiple version +# directories, initial revisions must be specified with --version-path. +# The path separator used here should be the separator specified by "path_separator" +# below. +# version_locations = %(here)s/bar:%(here)s/bat:%(here)s/alembic/versions + +# path_separator; This indicates what character is used to split lists of file +# paths, including version_locations and prepend_sys_path within configparser +# files such as alembic.ini. +# The default rendered in new alembic.ini files is "os", which uses os.pathsep +# to provide os-dependent path splitting. +# +# Note that in order to support legacy alembic.ini files, this default does NOT +# take place if path_separator is not present in alembic.ini. If this +# option is omitted entirely, fallback logic is as follows: +# +# 1. Parsing of the version_locations option falls back to using the legacy +# "version_path_separator" key, which if absent then falls back to the legacy +# behavior of splitting on spaces and/or commas. +# 2. Parsing of the prepend_sys_path option falls back to the legacy +# behavior of splitting on spaces, commas, or colons. +# +# Valid values for path_separator are: +# +# path_separator = : +# path_separator = ; +# path_separator = space +# path_separator = newline +# +# Use os.pathsep. Default configuration used for new projects. +path_separator = os + +# set to 'true' to search source files recursively +# in each "version_locations" directory +# new in Alembic version 1.10 +# recursive_version_locations = false + +# the output encoding used when revision files +# are written from script.py.mako +# output_encoding = utf-8 + +# database URL. This is consumed by the user-maintained env.py script only. +# other means of configuring database URLs may be customized within the env.py +# file. +sqlalchemy.url = driver://user:pass@localhost/dbname + + +[post_write_hooks] +# post_write_hooks defines scripts or Python functions that are run +# on newly generated revision scripts. See the documentation for further +# detail and examples + +# format using "black" - use the console_scripts runner, against the "black" entrypoint +# hooks = black +# black.type = console_scripts +# black.entrypoint = black +# black.options = -l 79 REVISION_SCRIPT_FILENAME + +# lint with attempts to fix using "ruff" - use the module runner, against the "ruff" module +# hooks = ruff +# ruff.type = module +# ruff.module = ruff +# ruff.options = check --fix REVISION_SCRIPT_FILENAME + +# Alternatively, use the exec runner to execute a binary found on your PATH +# hooks = ruff +# ruff.type = exec +# ruff.executable = ruff +# ruff.options = check --fix REVISION_SCRIPT_FILENAME + +# Logging configuration. This is also consumed by the user-maintained +# env.py script only. +[loggers] +keys = root,sqlalchemy,alembic + +[handlers] +keys = console + +[formatters] +keys = generic + +[logger_root] +level = WARNING +handlers = console +qualname = + +[logger_sqlalchemy] +level = WARNING +handlers = +qualname = sqlalchemy.engine + +[logger_alembic] +level = INFO +handlers = +qualname = alembic + +[handler_console] +class = StreamHandler +args = (sys.stderr,) +level = NOTSET +formatter = generic + +[formatter_generic] +format = %(levelname)-5.5s [%(name)s] %(message)s +datefmt = %H:%M:%S diff --git a/mcpgateway/alembic/README.md b/alembic/README similarity index 98% rename from mcpgateway/alembic/README.md rename to alembic/README index 00219a1a..1738eac7 100644 --- a/mcpgateway/alembic/README.md +++ b/alembic/README @@ -1,4 +1,4 @@ -# Alembic Migration Guide for `mcpgateway` +Generic single-database configuration.# Alembic Migration Guide for `mcpgateway` > Creating, applying, and managing schema migrations with Alembic. @@ -169,4 +169,4 @@ make db-history * Official docs: [https://alembic.sqlalchemy.org](https://alembic.sqlalchemy.org) * Autogenerate docs: [https://alembic.sqlalchemy.org/en/latest/autogenerate.html](https://alembic.sqlalchemy.org/en/latest/autogenerate.html) ---- +--- \ No newline at end of file diff --git a/alembic/env.py b/alembic/env.py new file mode 100644 index 00000000..ff5e0600 --- /dev/null +++ b/alembic/env.py @@ -0,0 +1,187 @@ +# -*- coding: utf-8 -*- +"""Alembic environment configuration for database migrations. + +Copyright 2025 +SPDX-License-Identifier: Apache-2.0 +Authors: Mihai Criveti, Madhav Kandukuri + +This module configures the Alembic migration environment for the MCP Gateway +application. It sets up both offline and online migration modes, configures +logging, and establishes the database connection parameters. + +The module performs the following key functions: +- Configures Alembic to locate migration scripts in the mcpgateway package +- Sets up Python logging based on the alembic.ini configuration +- Imports the SQLAlchemy metadata from the application models +- Configures the database URL from application settings +- Provides functions for running migrations in both offline and online modes + +Offline mode generates SQL scripts without connecting to the database, while +online mode executes migrations directly against a live database connection. + +Attributes: + config (Config): The Alembic configuration object loaded from alembic.ini. + target_metadata (MetaData): SQLAlchemy metadata object containing all + table definitions from the application models. + +Examples: + Running migrations in offline mode:: + + alembic upgrade head --sql + + Running migrations in online mode:: + + alembic upgrade head + + The module is typically not imported directly but is used by Alembic + when executing migration commands. + +Note: + This file is automatically executed by Alembic and should not be + imported or run directly by application code. +""" + +# Standard +from importlib.resources import files +from logging.config import fileConfig + +# Third-Party +from alembic import context + +# this is the Alembic Config object, which provides +# access to the values within the .ini file in use. +from alembic.config import Config +from sqlalchemy import engine_from_config, pool + +# First-Party +from mcpgateway.config import settings +from mcpgateway.db import Base + +# from mcpgateway.db import get_metadata +# target_metadata = get_metadata() + + +# Create config object - this is the standard way in Alembic +config = getattr(context, "config", None) or Config() + + +def _inside_alembic() -> bool: + """Detect if this module is being executed by the Alembic CLI. + + This function checks whether the current execution context is within + an Alembic migration environment. It's used to prevent migration code + from running when this module is imported for other purposes (e.g., + during testing or when importing models). + + The detection works by checking for the presence of the '_proxy' attribute + on the alembic.context object. This attribute is set internally by Alembic + when it loads and executes the env.py file during migration operations. + + Returns: + bool: True if running under Alembic CLI (e.g., during 'alembic upgrade', + 'alembic downgrade', etc.), False if imported normally by Python + code or during testing. + + Examples: + When running migrations:: + + $ alembic upgrade head + # _inside_alembic() returns True + + When importing in tests or application code:: + + from mcpgateway.alembic.env import target_metadata + # _inside_alembic() returns False + + Note: + This guard is crucial to prevent the migration execution code at the + bottom of this module from running during normal imports. Without it, + importing this module would attempt to run migrations every time. + """ + return getattr(context, "_proxy", None) is not None + + +config.set_main_option("script_location", str(files("mcpgateway").joinpath("alembic"))) + +# Interpret the config file for Python logging. +# This line sets up loggers basically. +if config.config_file_name is not None: + fileConfig( + config.config_file_name, + disable_existing_loggers=False, + ) + +# First-Party +# add your model's MetaData object here +# for 'autogenerate' support +# from myapp import mymodel + +target_metadata = Base.metadata + +# other values from the config, defined by the needs of env.py, +# can be acquired: +# my_important_option = config.get_main_option("my_important_option") +# ... etc. + +config.set_main_option( + "sqlalchemy.url", + settings.database_url, +) + + +def run_migrations_offline() -> None: + """Run migrations in 'offline' mode. + + This configures the context with just a URL + and not an Engine, though an Engine is acceptable + here as well. By skipping the Engine creation + we don't even need a DBAPI to be available. + + Calls to context.execute() here emit the given string to the + script output. + + """ + url = config.get_main_option("sqlalchemy.url") + context.configure( + url=url, + target_metadata=target_metadata, + literal_binds=True, + dialect_opts={"paramstyle": "named"}, + ) + + with context.begin_transaction(): + context.run_migrations() + + +def run_migrations_online() -> None: + """Run migrations in 'online' mode. + + In this scenario we need to create an Engine + and associate a connection with the context. + + """ + connection = config.attributes.get("connection") + if connection is None: + connectable = engine_from_config( + config.get_section(config.config_ini_section, {}), + prefix="sqlalchemy.", + poolclass=pool.NullPool, + ) + + with connectable.connect() as connection: + context.configure(connection=connection, target_metadata=target_metadata) + + with context.begin_transaction(): + context.run_migrations() + else: + context.configure(connection=connection, target_metadata=target_metadata) + + with context.begin_transaction(): + context.run_migrations() + + +if _inside_alembic(): + if context.is_offline_mode(): + run_migrations_offline() + else: + run_migrations_online() \ No newline at end of file diff --git a/mcpgateway/alembic/script.py.mako b/alembic/script.py.mako similarity index 100% rename from mcpgateway/alembic/script.py.mako rename to alembic/script.py.mako diff --git a/alembic/versions/b77ca9d2de7e_uuid_pk_and_slug_refactor.py b/alembic/versions/b77ca9d2de7e_uuid_pk_and_slug_refactor.py new file mode 100644 index 00000000..6d4edabc --- /dev/null +++ b/alembic/versions/b77ca9d2de7e_uuid_pk_and_slug_refactor.py @@ -0,0 +1,552 @@ +# -*- coding: utf-8 -*- +"""uuid-pk_and_slug_refactor + +Revision ID: b77ca9d2de7e +Revises: +Create Date: 2025-06-26 21:29:59.117140 + +""" + +# Standard +from typing import Sequence, Union +import uuid + +# Third-Party +from alembic import op +import sqlalchemy as sa +from sqlalchemy.orm import Session + +# First-Party +from mcpgateway.config import settings +from mcpgateway.utils.create_slug import slugify + +# revision identifiers, used by Alembic. +revision: str = "b77ca9d2de7e" +down_revision: Union[str, Sequence[str], None] = None +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +# ────────────────────────────────────────────────────────────────────────────── +# Helpers +# ────────────────────────────────────────────────────────────────────────────── +def _use_batch() -> bool: + """Determine if batch operations are required for the current database. + + SQLite requires batch mode for certain ALTER TABLE operations like dropping + columns or altering column types. This helper checks the database dialect + to determine if batch operations should be used. + + Returns: + bool: True if the database is SQLite (requires batch mode), False otherwise. + + Examples: + >>> # In a SQLite context + >>> _use_batch() # doctest: +SKIP + True + >>> # In a PostgreSQL context + >>> _use_batch() # doctest: +SKIP + False + """ + return op.get_bind().dialect.name == "sqlite" + + +# ────────────────────────────────────────────────────────────────────────────── +# Upgrade +# ────────────────────────────────────────────────────────────────────────────── +def upgrade() -> None: + """Migrate database schema from integer to UUID primary keys with slugs. + + This migration performs a comprehensive schema transformation in three stages: + + Stage 1 - Add placeholder columns: + - Adds UUID columns (id_new) to gateways, tools, and servers + - Adds slug columns for human-readable identifiers + - Adds columns to preserve original tool names before prefixing + + Stage 2 - Data migration: + - Generates UUIDs for all primary keys + - Creates slugs from names (e.g., "My Gateway" -> "my-gateway") + - Prefixes tool names with gateway slugs (e.g., "my-tool" -> "gateway-slug-my-tool") + - Updates all foreign key references to use new UUIDs + + Stage 3 - Schema finalization: + - Drops old integer columns + - Renames new UUID columns to replace old ones + - Recreates primary keys and foreign key constraints + - Adds unique constraints on slugs and URLs + + The migration is designed to work with both SQLite (using batch operations) + and other databases. It preserves all existing data relationships while + transforming the schema. + + Note: + - Skips migration if database is fresh (no gateways table) + - Uses batch operations for SQLite compatibility + - Commits data changes before schema alterations + + Examples: + >>> # Running the migration + >>> upgrade() # doctest: +SKIP + Fresh database detected. Skipping migration. + >>> # Or for existing database + >>> upgrade() # doctest: +SKIP + Existing installation detected. Starting data and schema migration... + """ + bind = op.get_bind() + sess = Session(bind=bind) + inspector = sa.inspect(bind) + + if not inspector.has_table("gateways"): + print("Fresh database detected. Skipping migration.") + return + + print("Existing installation detected. Starting data and schema migration...") + + # ── STAGE 1: ADD NEW NULLABLE COLUMNS AS PLACEHOLDERS ───────────────── + op.add_column("gateways", sa.Column("slug", sa.String(), nullable=True)) + op.add_column("gateways", sa.Column("id_new", sa.String(36), nullable=True)) + + op.add_column("tools", sa.Column("id_new", sa.String(36), nullable=True)) + op.add_column("tools", sa.Column("original_name", sa.String(), nullable=True)) + op.add_column("tools", sa.Column("original_name_slug", sa.String(), nullable=True)) + op.add_column("tools", sa.Column("name_new", sa.String(), nullable=True)) + op.add_column("tools", sa.Column("gateway_id_new", sa.String(36), nullable=True)) + + op.add_column("resources", sa.Column("gateway_id_new", sa.String(36), nullable=True)) + op.add_column("prompts", sa.Column("gateway_id_new", sa.String(36), nullable=True)) + + op.add_column("servers", sa.Column("id_new", sa.String(36), nullable=True)) + + op.add_column("server_tool_association", sa.Column("server_id_new", sa.String(36), nullable=True)) + op.add_column("server_tool_association", sa.Column("tool_id_new", sa.String(36), nullable=True)) + + op.add_column("tool_metrics", sa.Column("tool_id_new", sa.String(36), nullable=True)) + op.add_column("server_metrics", sa.Column("server_id_new", sa.String(36), nullable=True)) + op.add_column("server_resource_association", sa.Column("server_id_new", sa.String(36), nullable=True)) + op.add_column("server_prompt_association", sa.Column("server_id_new", sa.String(36), nullable=True)) + + # ── STAGE 2: POPULATE THE NEW COLUMNS (DATA MIGRATION) ─────────────── + gateways = sess.execute(sa.select(sa.text("id, name")).select_from(sa.text("gateways"))).all() + for gid, gname in gateways: + g_uuid = uuid.uuid4().hex + sess.execute( + sa.text("UPDATE gateways SET id_new=:u, slug=:s WHERE id=:i"), + {"u": g_uuid, "s": slugify(gname), "i": gid}, + ) + + tools = sess.execute(sa.select(sa.text("id, name, gateway_id")).select_from(sa.text("tools"))).all() + for tid, tname, g_old in tools: + t_uuid = uuid.uuid4().hex + tool_slug = slugify(tname) + sess.execute( + sa.text( + """ + UPDATE tools + SET id_new=:u, + original_name=:on, + original_name_slug=:ons, + name_new = CASE + WHEN :g IS NOT NULL THEN (SELECT slug FROM gateways WHERE id = :g) || :sep || :ons + ELSE :ons + END, + gateway_id_new=(SELECT id_new FROM gateways WHERE id=:g) + WHERE id=:i + """ + ), + { + "u": t_uuid, + "on": tname, + "ons": tool_slug, + "sep": settings.gateway_tool_name_separator, + "g": g_old, + "i": tid, + }, + ) + + servers = sess.execute(sa.select(sa.text("id")).select_from(sa.text("servers"))).all() + for (sid,) in servers: + sess.execute( + sa.text("UPDATE servers SET id_new=:u WHERE id=:i"), + {"u": uuid.uuid4().hex, "i": sid}, + ) + + # Populate all dependent tables + resources = sess.execute(sa.select(sa.text("id, gateway_id")).select_from(sa.text("resources"))).all() + for rid, g_old in resources: + sess.execute(sa.text("UPDATE resources SET gateway_id_new=(SELECT id_new FROM gateways WHERE id=:g) WHERE id=:i"), {"g": g_old, "i": rid}) + prompts = sess.execute(sa.select(sa.text("id, gateway_id")).select_from(sa.text("prompts"))).all() + for pid, g_old in prompts: + sess.execute(sa.text("UPDATE prompts SET gateway_id_new=(SELECT id_new FROM gateways WHERE id=:g) WHERE id=:i"), {"g": g_old, "i": pid}) + sta = sess.execute(sa.select(sa.text("server_id, tool_id")).select_from(sa.text("server_tool_association"))).all() + for s_old, t_old in sta: + sess.execute( + sa.text("UPDATE server_tool_association SET server_id_new=(SELECT id_new FROM servers WHERE id=:s), tool_id_new=(SELECT id_new FROM tools WHERE id=:t) WHERE server_id=:s AND tool_id=:t"), + {"s": s_old, "t": t_old}, + ) + tool_metrics = sess.execute(sa.select(sa.text("id, tool_id")).select_from(sa.text("tool_metrics"))).all() + for tmid, t_old in tool_metrics: + sess.execute(sa.text("UPDATE tool_metrics SET tool_id_new=(SELECT id_new FROM tools WHERE id=:t) WHERE id=:i"), {"t": t_old, "i": tmid}) + server_metrics = sess.execute(sa.select(sa.text("id, server_id")).select_from(sa.text("server_metrics"))).all() + for smid, s_old in server_metrics: + sess.execute(sa.text("UPDATE server_metrics SET server_id_new=(SELECT id_new FROM servers WHERE id=:s) WHERE id=:i"), {"s": s_old, "i": smid}) + server_resource_assoc = sess.execute(sa.select(sa.text("server_id, resource_id")).select_from(sa.text("server_resource_association"))).all() + for s_old, r_id in server_resource_assoc: + sess.execute(sa.text("UPDATE server_resource_association SET server_id_new=(SELECT id_new FROM servers WHERE id=:s) WHERE server_id=:s AND resource_id=:r"), {"s": s_old, "r": r_id}) + server_prompt_assoc = sess.execute(sa.select(sa.text("server_id, prompt_id")).select_from(sa.text("server_prompt_association"))).all() + for s_old, p_id in server_prompt_assoc: + sess.execute(sa.text("UPDATE server_prompt_association SET server_id_new=(SELECT id_new FROM servers WHERE id=:s) WHERE server_id=:s AND prompt_id=:p"), {"s": s_old, "p": p_id}) + + sess.commit() + + # ── STAGE 3: FINALIZE SCHEMA (CORRECTED ORDER) ─────────────────────── + # First, rebuild all tables that depend on `servers` and `gateways`. + # This implicitly drops their old foreign key constraints. + with op.batch_alter_table("server_tool_association") as batch_op: + batch_op.drop_column("server_id") + batch_op.drop_column("tool_id") + batch_op.alter_column("server_id_new", new_column_name="server_id", nullable=False) + batch_op.alter_column("tool_id_new", new_column_name="tool_id", nullable=False) + batch_op.create_primary_key("pk_server_tool_association", ["server_id", "tool_id"]) + + with op.batch_alter_table("server_resource_association") as batch_op: + batch_op.drop_column("server_id") + batch_op.alter_column("server_id_new", new_column_name="server_id", nullable=False) + + with op.batch_alter_table("server_prompt_association") as batch_op: + batch_op.drop_column("server_id") + batch_op.alter_column("server_id_new", new_column_name="server_id", nullable=False) + + with op.batch_alter_table("server_metrics") as batch_op: + batch_op.drop_column("server_id") + batch_op.alter_column("server_id_new", new_column_name="server_id", nullable=False) + + with op.batch_alter_table("tool_metrics") as batch_op: + batch_op.drop_column("tool_id") + batch_op.alter_column("tool_id_new", new_column_name="tool_id", nullable=False) + + with op.batch_alter_table("tools") as batch_op: + batch_op.drop_column("id") + batch_op.alter_column("id_new", new_column_name="id", nullable=False) + batch_op.create_primary_key("pk_tools", ["id"]) + batch_op.drop_column("gateway_id") + batch_op.alter_column("gateway_id_new", new_column_name="gateway_id", nullable=True) + batch_op.drop_column("name") + batch_op.alter_column("name_new", new_column_name="name", nullable=True) + batch_op.alter_column("original_name", nullable=False) + batch_op.alter_column("original_name_slug", nullable=False) + batch_op.create_unique_constraint("uq_tools_name", ["name"]) + batch_op.create_unique_constraint("uq_gateway_id__original_name", ["gateway_id", "original_name"]) + + with op.batch_alter_table("resources") as batch_op: + batch_op.drop_column("gateway_id") + batch_op.alter_column("gateway_id_new", new_column_name="gateway_id", nullable=True) + + with op.batch_alter_table("prompts") as batch_op: + batch_op.drop_column("gateway_id") + batch_op.alter_column("gateway_id_new", new_column_name="gateway_id", nullable=True) + + # Second, now that no tables point to their old IDs, rebuild `gateways` and `servers`. + with op.batch_alter_table("gateways") as batch_op: + batch_op.drop_column("id") + batch_op.alter_column("id_new", new_column_name="id", nullable=False) + batch_op.create_primary_key("pk_gateways", ["id"]) + batch_op.alter_column("slug", nullable=False) + batch_op.create_unique_constraint("uq_gateways_slug", ["slug"]) + batch_op.create_unique_constraint("uq_gateways_url", ["url"]) + + with op.batch_alter_table("servers") as batch_op: + batch_op.drop_column("id") + batch_op.alter_column("id_new", new_column_name="id", nullable=False) + batch_op.create_primary_key("pk_servers", ["id"]) + + # Finally, recreate all the foreign key constraints in batch mode for SQLite compatibility. + # The redundant `source_table` argument has been removed from each call. + with op.batch_alter_table("tools") as batch_op: + batch_op.create_foreign_key("fk_tools_gateway_id", "gateways", ["gateway_id"], ["id"]) + with op.batch_alter_table("resources") as batch_op: + batch_op.create_foreign_key("fk_resources_gateway_id", "gateways", ["gateway_id"], ["id"]) + with op.batch_alter_table("prompts") as batch_op: + batch_op.create_foreign_key("fk_prompts_gateway_id", "gateways", ["gateway_id"], ["id"]) + with op.batch_alter_table("server_tool_association") as batch_op: + batch_op.create_foreign_key("fk_server_tool_association_servers", "servers", ["server_id"], ["id"]) + batch_op.create_foreign_key("fk_server_tool_association_tools", "tools", ["tool_id"], ["id"]) + with op.batch_alter_table("tool_metrics") as batch_op: + batch_op.create_foreign_key("fk_tool_metrics_tool_id", "tools", ["tool_id"], ["id"]) + with op.batch_alter_table("server_metrics") as batch_op: + batch_op.create_foreign_key("fk_server_metrics_server_id", "servers", ["server_id"], ["id"]) + with op.batch_alter_table("server_resource_association") as batch_op: + batch_op.create_foreign_key("fk_server_resource_association_server_id", "servers", ["server_id"], ["id"]) + with op.batch_alter_table("server_prompt_association") as batch_op: + batch_op.create_foreign_key("fk_server_prompt_association_server_id", "servers", ["server_id"], ["id"]) + + +# def upgrade() -> None: +# bind = op.get_bind() +# sess = Session(bind=bind) +# inspector = sa.inspect(bind) + +# if not inspector.has_table("gateways"): +# print("Fresh database detected. Skipping migration.") +# return + +# print("Existing installation detected. Starting data and schema migration...") + +# # ── STAGE 1: ADD NEW NULLABLE COLUMNS AS PLACEHOLDERS ───────────────── +# op.add_column("gateways", sa.Column("slug", sa.String(), nullable=True)) +# op.add_column("gateways", sa.Column("id_new", sa.String(36), nullable=True)) + +# op.add_column("tools", sa.Column("id_new", sa.String(36), nullable=True)) +# op.add_column("tools", sa.Column("original_name", sa.String(), nullable=True)) +# op.add_column("tools", sa.Column("original_name_slug", sa.String(), nullable=True)) +# op.add_column("tools", sa.Column("name_new", sa.String(), nullable=True)) +# op.add_column("tools", sa.Column("gateway_id_new", sa.String(36), nullable=True)) + +# op.add_column("resources", sa.Column("gateway_id_new", sa.String(36), nullable=True)) +# op.add_column("prompts", sa.Column("gateway_id_new", sa.String(36), nullable=True)) + +# op.add_column("servers", sa.Column("id_new", sa.String(36), nullable=True)) + +# op.add_column("server_tool_association", sa.Column("server_id_new", sa.String(36), nullable=True)) +# op.add_column("server_tool_association", sa.Column("tool_id_new", sa.String(36), nullable=True)) + +# op.add_column("tool_metrics", sa.Column("tool_id_new", sa.String(36), nullable=True)) + +# # Add columns for the new server dependencies +# op.add_column("server_metrics", sa.Column("server_id_new", sa.String(36), nullable=True)) +# op.add_column("server_resource_association", sa.Column("server_id_new", sa.String(36), nullable=True)) +# op.add_column("server_prompt_association", sa.Column("server_id_new", sa.String(36), nullable=True)) + + +# # ── STAGE 2: POPULATE THE NEW COLUMNS (DATA MIGRATION) ─────────────── +# gateways = sess.execute(sa.select(sa.text("id, name")).select_from(sa.text("gateways"))).all() +# for gid, gname in gateways: +# g_uuid = uuid.uuid4().hex +# sess.execute( +# sa.text("UPDATE gateways SET id_new=:u, slug=:s WHERE id=:i"), +# {"u": g_uuid, "s": slugify(gname), "i": gid}, +# ) + +# tools = sess.execute( +# sa.select(sa.text("id, name, gateway_id")).select_from(sa.text("tools")) +# ).all() +# for tid, tname, g_old in tools: +# t_uuid = uuid.uuid4().hex +# tool_slug = slugify(tname) +# sess.execute( +# sa.text( +# """ +# UPDATE tools +# SET id_new=:u, +# original_name=:on, +# original_name_slug=:ons, +# name_new = CASE +# WHEN :g IS NOT NULL THEN (SELECT slug FROM gateways WHERE id = :g) || :sep || :ons +# ELSE :ons +# END, +# gateway_id_new=(SELECT id_new FROM gateways WHERE id=:g) +# WHERE id=:i +# """ +# ), +# { +# "u": t_uuid, "on": tname, "ons": tool_slug, +# "sep": settings.gateway_tool_name_separator, "g": g_old, "i": tid, +# }, +# ) + +# servers = sess.execute(sa.select(sa.text("id")).select_from(sa.text("servers"))).all() +# for (sid,) in servers: +# sess.execute( +# sa.text("UPDATE servers SET id_new=:u WHERE id=:i"), +# {"u": uuid.uuid4().hex, "i": sid}, +# ) + +# # Populate all dependent tables +# resources = sess.execute(sa.select(sa.text("id, gateway_id")).select_from(sa.text("resources"))).all() +# for rid, g_old in resources: +# sess.execute(sa.text("UPDATE resources SET gateway_id_new=(SELECT id_new FROM gateways WHERE id=:g) WHERE id=:i"), {"g": g_old, "i": rid}) +# prompts = sess.execute(sa.select(sa.text("id, gateway_id")).select_from(sa.text("prompts"))).all() +# for pid, g_old in prompts: +# sess.execute(sa.text("UPDATE prompts SET gateway_id_new=(SELECT id_new FROM gateways WHERE id=:g) WHERE id=:i"), {"g": g_old, "i": pid}) +# sta = sess.execute(sa.select(sa.text("server_id, tool_id")).select_from(sa.text("server_tool_association"))).all() +# for s_old, t_old in sta: +# sess.execute(sa.text("UPDATE server_tool_association SET server_id_new=(SELECT id_new FROM servers WHERE id=:s), tool_id_new=(SELECT id_new FROM tools WHERE id=:t) WHERE server_id=:s AND tool_id=:t"), {"s": s_old, "t": t_old}) +# tool_metrics = sess.execute(sa.select(sa.text("id, tool_id")).select_from(sa.text("tool_metrics"))).all() +# for tmid, t_old in tool_metrics: +# sess.execute(sa.text("UPDATE tool_metrics SET tool_id_new=(SELECT id_new FROM tools WHERE id=:t) WHERE id=:i"), {"t": t_old, "i": tmid}) +# server_metrics = sess.execute(sa.select(sa.text("id, server_id")).select_from(sa.text("server_metrics"))).all() +# for smid, s_old in server_metrics: +# sess.execute(sa.text("UPDATE server_metrics SET server_id_new=(SELECT id_new FROM servers WHERE id=:s) WHERE id=:i"), {"s": s_old, "i": smid}) +# server_resource_assoc = sess.execute(sa.select(sa.text("server_id, resource_id")).select_from(sa.text("server_resource_association"))).all() +# for s_old, r_id in server_resource_assoc: +# sess.execute(sa.text("UPDATE server_resource_association SET server_id_new=(SELECT id_new FROM servers WHERE id=:s) WHERE server_id=:s AND resource_id=:r"), {"s": s_old, "r": r_id}) +# server_prompt_assoc = sess.execute(sa.select(sa.text("server_id, prompt_id")).select_from(sa.text("server_prompt_association"))).all() +# for s_old, p_id in server_prompt_assoc: +# sess.execute(sa.text("UPDATE server_prompt_association SET server_id_new=(SELECT id_new FROM servers WHERE id=:s) WHERE server_id=:s AND prompt_id=:p"), {"s": s_old, "p": p_id}) + +# sess.commit() + +# # ── STAGE 3: FINALIZE SCHEMA (CORRECTED ORDER) ─────────────────────── +# with op.batch_alter_table("server_tool_association") as batch_op: +# batch_op.drop_column("server_id") +# batch_op.drop_column("tool_id") +# batch_op.alter_column("server_id_new", new_column_name="server_id", nullable=False) +# batch_op.alter_column("tool_id_new", new_column_name="tool_id", nullable=False) +# batch_op.create_primary_key("pk_server_tool_association", ["server_id", "tool_id"]) + +# with op.batch_alter_table("server_resource_association") as batch_op: +# batch_op.drop_column("server_id") +# batch_op.alter_column("server_id_new", new_column_name="server_id", nullable=False) + +# with op.batch_alter_table("server_prompt_association") as batch_op: +# batch_op.drop_column("server_id") +# batch_op.alter_column("server_id_new", new_column_name="server_id", nullable=False) + +# with op.batch_alter_table("server_metrics") as batch_op: +# batch_op.drop_column("server_id") +# batch_op.alter_column("server_id_new", new_column_name="server_id", nullable=False) + +# with op.batch_alter_table("tool_metrics") as batch_op: +# batch_op.drop_column("tool_id") +# batch_op.alter_column("tool_id_new", new_column_name="tool_id", nullable=False) + +# with op.batch_alter_table("tools") as batch_op: +# batch_op.drop_column("id") +# batch_op.alter_column("id_new", new_column_name="id", nullable=False) +# batch_op.create_primary_key("pk_tools", ["id"]) +# batch_op.drop_column("gateway_id") +# batch_op.alter_column("gateway_id_new", new_column_name="gateway_id", nullable=True) +# batch_op.drop_column("name") +# batch_op.alter_column("name_new", new_column_name="name", nullable=False) +# batch_op.alter_column("original_name", nullable=False) +# batch_op.alter_column("original_name_slug", nullable=False) +# batch_op.create_unique_constraint("uq_tools_name", ["name"]) +# batch_op.create_unique_constraint("uq_gateway_id__original_name", ["gateway_id", "original_name"]) + +# with op.batch_alter_table("resources") as batch_op: +# batch_op.drop_column("gateway_id") +# batch_op.alter_column("gateway_id_new", new_column_name="gateway_id", nullable=True) + +# with op.batch_alter_table("prompts") as batch_op: +# batch_op.drop_column("gateway_id") +# batch_op.alter_column("gateway_id_new", new_column_name="gateway_id", nullable=True) + +# with op.batch_alter_table("gateways") as batch_op: +# batch_op.drop_column("id") +# batch_op.alter_column("id_new", new_column_name="id", nullable=False) +# batch_op.create_primary_key("pk_gateways", ["id"]) +# batch_op.alter_column("slug", nullable=False) +# batch_op.create_unique_constraint("uq_gateways_slug", ["slug"]) +# batch_op.create_unique_constraint("uq_gateways_url", ["url"]) + +# with op.batch_alter_table("servers") as batch_op: +# batch_op.drop_column("id") +# batch_op.alter_column("id_new", new_column_name="id", nullable=False) +# batch_op.create_primary_key("pk_servers", ["id"]) + +# # Finally, recreate all the foreign key constraints +# op.create_foreign_key("fk_tools_gateway_id", "tools", "gateways", ["gateway_id"], ["id"]) +# op.create_foreign_key("fk_resources_gateway_id", "resources", "gateways", ["gateway_id"], ["id"]) +# op.create_foreign_key("fk_prompts_gateway_id", "prompts", "gateways", ["gateway_id"], ["id"]) +# op.create_foreign_key("fk_server_tool_association_servers", "server_tool_association", "servers", ["server_id"], ["id"]) +# op.create_foreign_key("fk_server_tool_association_tools", "server_tool_association", "tools", ["tool_id"], ["id"]) +# op.create_foreign_key("fk_tool_metrics_tool_id", "tool_metrics", "tools", ["tool_id"], ["id"]) +# op.create_foreign_key("fk_server_metrics_server_id", "server_metrics", "servers", ["server_id"], ["id"]) +# op.create_foreign_key("fk_server_resource_association_server_id", "server_resource_association", "servers", ["server_id"], ["id"]) +# op.create_foreign_key("fk_server_prompt_association_server_id", "server_prompt_association", "servers", ["server_id"], ["id"]) + + +def downgrade() -> None: + """Revert database schema from UUID primary keys back to integers. + + This downgrade reverses the UUID migration but with significant limitations: + - Schema structure is restored but data is NOT preserved + - All UUID values and slug fields are lost + - Foreign key relationships are broken (columns will be NULL) + - Original integer IDs cannot be recovered + + The downgrade operates in reverse order of the upgrade: + + Stage 1 - Revert schema changes: + - Drops UUID-based constraints and keys + - Renames UUID columns back to temporary names + - Re-adds integer columns (empty/NULL) + + Stage 2 - Data migration (skipped): + - Original integer IDs cannot be restored from UUIDs + - Relationships cannot be reconstructed + + Stage 3 - Remove temporary columns: + - Drops all UUID and slug columns + - Leaves database with original schema but no data + + Warning: + This downgrade is destructive and should only be used if you need + to revert the schema structure. All data in affected tables will + need to be manually restored from backups. + + Examples: + >>> # Running the downgrade + >>> downgrade() # doctest: +SKIP + # Schema reverted but data is lost + """ + # ── STAGE 1 (REVERSE): Revert Schema to original state ───────────────── + # This reverses the operations from STAGE 3 of the upgrade. + # Data from the new columns will be lost, which is expected. + + with op.batch_alter_table("server_tool_association") as batch_op: + # Drop new constraints + batch_op.drop_constraint("fk_server_tool_association_tools", type_="foreignkey") + batch_op.drop_constraint("fk_server_tool_association_servers", type_="foreignkey") + batch_op.drop_constraint("pk_server_tool_association", type_="primarykey") + # Rename final columns back to temporary names + batch_op.alter_column("server_id", new_column_name="server_id_new") + batch_op.alter_column("tool_id", new_column_name="tool_id_new") + # Add back old integer columns (data is not restored) + batch_op.add_column(sa.Column("server_id", sa.Integer(), nullable=True)) + batch_op.add_column(sa.Column("tool_id", sa.Integer(), nullable=True)) + + with op.batch_alter_table("tools") as batch_op: + # Drop new constraints + batch_op.drop_constraint("fk_tools_gateway_id", type_="foreignkey") + batch_op.drop_constraint("uq_gateway_id__original_name", type_="unique") + batch_op.drop_constraint("uq_tools_name", type_="unique") + batch_op.drop_constraint("pk_tools", type_="primarykey") + # Rename final columns back to temporary names + batch_op.alter_column("id", new_column_name="id_new") + batch_op.alter_column("gateway_id", new_column_name="gateway_id_new") + batch_op.alter_column("name", new_column_name="name_new") + # Add back old columns + batch_op.add_column(sa.Column("id", sa.Integer(), nullable=True)) + batch_op.add_column(sa.Column("gateway_id", sa.Integer(), nullable=True)) + batch_op.add_column(sa.Column("name", sa.String(), nullable=True)) + + with op.batch_alter_table("servers") as batch_op: + batch_op.drop_constraint("pk_servers", type_="primarykey") + batch_op.alter_column("id", new_column_name="id_new") + batch_op.add_column(sa.Column("id", sa.Integer(), nullable=True)) + + with op.batch_alter_table("gateways") as batch_op: + batch_op.drop_constraint("uq_gateways_url", type_="unique") + batch_op.drop_constraint("uq_gateways_slug", type_="unique") + batch_op.drop_constraint("pk_gateways", type_="primarykey") + batch_op.alter_column("id", new_column_name="id_new") + batch_op.add_column(sa.Column("id", sa.Integer(), nullable=True)) + + # ── STAGE 2 (REVERSE): Reverse Data Migration (No-Op for Schema) ────── + # Reversing the data population (e.g., creating integer PKs from UUIDs) + # is a complex, stateful operation and is omitted here. At this point, + # the original columns exist but are empty (NULL). + + # ── STAGE 3 (REVERSE): Drop the temporary/new columns ──────────────── + # This reverses the operations from STAGE 1 of the upgrade. + op.drop_column("server_tool_association", "tool_id_new") + op.drop_column("server_tool_association", "server_id_new") + op.drop_column("servers", "id_new") + op.drop_column("tools", "gateway_id_new") + op.drop_column("tools", "name_new") + op.drop_column("tools", "original_name_slug") + op.drop_column("tools", "original_name") + op.drop_column("tools", "id_new") + op.drop_column("gateways", "id_new") + op.drop_column("gateways", "slug") \ No newline at end of file diff --git a/mcpgateway/alembic/versions/e4fc04d1a442_add_annotations_to_tables.py b/alembic/versions/e4fc04d1a442_add_annotations_to_tables.py similarity index 97% rename from mcpgateway/alembic/versions/e4fc04d1a442_add_annotations_to_tables.py rename to alembic/versions/e4fc04d1a442_add_annotations_to_tables.py index 8876f3b4..2ae872d3 100644 --- a/mcpgateway/alembic/versions/e4fc04d1a442_add_annotations_to_tables.py +++ b/alembic/versions/e4fc04d1a442_add_annotations_to_tables.py @@ -53,4 +53,4 @@ def downgrade() -> None: print("Fresh database detected. Skipping migration.") return - op.drop_column("tools", "annotations") + op.drop_column("tools", "annotations") \ No newline at end of file diff --git a/mcpgateway/alembic/versions/e75490e949b1_add_improved_status_to_tables.py b/alembic/versions/e75490e949b1_add_improved_status_to_tables.py similarity index 97% rename from mcpgateway/alembic/versions/e75490e949b1_add_improved_status_to_tables.py rename to alembic/versions/e75490e949b1_add_improved_status_to_tables.py index 097535b4..06c63ef9 100644 --- a/mcpgateway/alembic/versions/e75490e949b1_add_improved_status_to_tables.py +++ b/alembic/versions/e75490e949b1_add_improved_status_to_tables.py @@ -41,4 +41,4 @@ def downgrade(): op.drop_column("tools", "reachable") op.alter_column("gateways", "enabled", new_column_name="is_active") - op.drop_column("gateways", "reachable") + op.drop_column("gateways", "reachable") \ No newline at end of file diff --git a/mcpgateway/admin.py b/mcpgateway/admin.py index 8cc8a360..78aa24c4 100644 --- a/mcpgateway/admin.py +++ b/mcpgateway/admin.py @@ -3975,118 +3975,59 @@ async def admin_delete_root(uri: str, request: Request, user: str = Depends(requ MetricsDict = Dict[str, Union[ToolMetrics, ResourceMetrics, ServerMetrics, PromptMetrics]] -@admin_router.get("/metrics", response_model=MetricsDict) -async def admin_get_metrics( - db: Session = Depends(get_db), - user: str = Depends(require_auth), -) -> MetricsDict: - """ - Retrieve aggregate metrics for all entity types via the admin UI. - - This endpoint collects and returns usage metrics for tools, resources, servers, - and prompts. The metrics are retrieved by calling the aggregate_metrics method - on each respective service, which compiles statistics about usage patterns, - success rates, and other relevant metrics for administrative monitoring - and analysis purposes. - - Args: - db (Session): Database session dependency. - user (str): Authenticated user dependency. - - Returns: - MetricsDict: A dictionary containing the aggregated metrics for tools, - resources, servers, and prompts. Each value is a Pydantic model instance - specific to the entity type. - - Examples: - >>> import asyncio - >>> from unittest.mock import AsyncMock, MagicMock - >>> from mcpgateway.schemas import ToolMetrics, ResourceMetrics, ServerMetrics, PromptMetrics - >>> - >>> mock_db = MagicMock() - >>> mock_user = "test_user" - >>> - >>> mock_tool_metrics = ToolMetrics( - ... total_executions=10, - ... successful_executions=9, - ... failed_executions=1, - ... failure_rate=0.1, - ... min_response_time=0.05, - ... max_response_time=1.0, - ... avg_response_time=0.3, - ... last_execution_time=None - ... ) - >>> mock_resource_metrics = ResourceMetrics( - ... total_executions=5, - ... successful_executions=5, - ... failed_executions=0, - ... failure_rate=0.0, - ... min_response_time=0.1, - ... max_response_time=0.5, - ... avg_response_time=0.2, - ... last_execution_time=None - ... ) - >>> mock_server_metrics = ServerMetrics( - ... total_executions=7, - ... successful_executions=7, - ... failed_executions=0, - ... failure_rate=0.0, - ... min_response_time=0.2, - ... max_response_time=0.7, - ... avg_response_time=0.4, - ... last_execution_time=None - ... ) - >>> mock_prompt_metrics = PromptMetrics( - ... total_executions=3, - ... successful_executions=3, - ... failed_executions=0, - ... failure_rate=0.0, - ... min_response_time=0.15, - ... max_response_time=0.6, - ... avg_response_time=0.35, - ... last_execution_time=None - ... ) - >>> - >>> original_aggregate_metrics_tool = tool_service.aggregate_metrics - >>> original_aggregate_metrics_resource = resource_service.aggregate_metrics - >>> original_aggregate_metrics_server = server_service.aggregate_metrics - >>> original_aggregate_metrics_prompt = prompt_service.aggregate_metrics - >>> - >>> tool_service.aggregate_metrics = AsyncMock(return_value=mock_tool_metrics) - >>> resource_service.aggregate_metrics = AsyncMock(return_value=mock_resource_metrics) - >>> server_service.aggregate_metrics = AsyncMock(return_value=mock_server_metrics) - >>> prompt_service.aggregate_metrics = AsyncMock(return_value=mock_prompt_metrics) - >>> - >>> async def test_admin_get_metrics(): - ... result = await admin_get_metrics(mock_db, mock_user) - ... return ( - ... isinstance(result, dict) and - ... result.get("tools") == mock_tool_metrics and - ... result.get("resources") == mock_resource_metrics and - ... result.get("servers") == mock_server_metrics and - ... result.get("prompts") == mock_prompt_metrics - ... ) - >>> - >>> import asyncio; asyncio.run(test_admin_get_metrics()) - True - >>> - >>> tool_service.aggregate_metrics = original_aggregate_metrics_tool - >>> resource_service.aggregate_metrics = original_aggregate_metrics_resource - >>> server_service.aggregate_metrics = original_aggregate_metrics_server - >>> prompt_service.aggregate_metrics = original_aggregate_metrics_prompt - """ - logger.debug(f"User {user} requested aggregate metrics") - tool_metrics = await tool_service.aggregate_metrics(db) - resource_metrics = await resource_service.aggregate_metrics(db) - server_metrics = await server_service.aggregate_metrics(db) - prompt_metrics = await prompt_service.aggregate_metrics(db) - - return { - "tools": tool_metrics, - "resources": resource_metrics, - "servers": server_metrics, - "prompts": prompt_metrics, +# @admin_router.get("/metrics", response_model=MetricsDict) +# async def admin_get_metrics( +# db: Session = Depends(get_db), +# user: str = Depends(require_auth), +# ) -> MetricsDict: +# """ +# Retrieve aggregate metrics for all entity types via the admin UI. + +# This endpoint collects and returns usage metrics for tools, resources, servers, +# and prompts. The metrics are retrieved by calling the aggregate_metrics method +# on each respective service, which compiles statistics about usage patterns, +# success rates, and other relevant metrics for administrative monitoring +# and analysis purposes. + +# Args: +# db (Session): Database session dependency. +# user (str): Authenticated user dependency. + +# Returns: +# MetricsDict: A dictionary containing the aggregated metrics for tools, +# resources, servers, and prompts. Each value is a Pydantic model instance +# specific to the entity type. +# """ +# logger.debug(f"User {user} requested aggregate metrics") +# tool_metrics = await tool_service.aggregate_metrics(db) +# resource_metrics = await resource_service.aggregate_metrics(db) +# server_metrics = await server_service.aggregate_metrics(db) +# prompt_metrics = await prompt_service.aggregate_metrics(db) + +# # Return actual Pydantic model instances +# return { +# "tools": tool_metrics, +# "resources": resource_metrics, +# "servers": server_metrics, +# "prompts": prompt_metrics, +# } + +@admin_router.get("/metrics") +async def get_aggregated_metrics(db: Session = Depends(get_db)) -> Dict[str, Any]: + metrics = { + "tools": await tool_service.aggregate_metrics(db), + "resources": await resource_service.aggregate_metrics(db), + "prompts": await prompt_service.aggregate_metrics(db), + "servers": await server_service.aggregate_metrics(db), + "topPerformers": { + "tools": await tool_service.get_top_tools(db, limit=5), + "resources": await resource_service.get_top_resources(db, limit=5), + "prompts": await prompt_service.get_top_prompts(db, limit=5), + "servers": await server_service.get_top_servers(db, limit=5) + } + } } + return metrics @admin_router.post("/metrics/reset", response_model=Dict[str, object]) diff --git a/mcpgateway/handlers/sampling.py b/mcpgateway/handlers/sampling.py index a8e805ff..bcda47d9 100644 --- a/mcpgateway/handlers/sampling.py +++ b/mcpgateway/handlers/sampling.py @@ -221,7 +221,7 @@ async def create_message(self, db: Session, request: Dict[str, Any]) -> CreateMe # FIXME: Implement actual model sampling - currently returns mock response # For now return mock response response = self._mock_sample(messages=messages) - + # Convert to result return CreateMessageResult( content=TextContent(type="text", text=response), diff --git a/mcpgateway/migrations/env.py b/mcpgateway/migrations/env.py new file mode 100644 index 00000000..36112a3c --- /dev/null +++ b/mcpgateway/migrations/env.py @@ -0,0 +1,78 @@ +from logging.config import fileConfig + +from sqlalchemy import engine_from_config +from sqlalchemy import pool + +from alembic import context + +# this is the Alembic Config object, which provides +# access to the values within the .ini file in use. +config = context.config + +# Interpret the config file for Python logging. +# This line sets up loggers basically. +if config.config_file_name is not None: + fileConfig(config.config_file_name) + +# add your model's MetaData object here +# for 'autogenerate' support +# from myapp import mymodel +# target_metadata = mymodel.Base.metadata +target_metadata = None + +# other values from the config, defined by the needs of env.py, +# can be acquired: +# my_important_option = config.get_main_option("my_important_option") +# ... etc. + + +def run_migrations_offline() -> None: + """Run migrations in 'offline' mode. + + This configures the context with just a URL + and not an Engine, though an Engine is acceptable + here as well. By skipping the Engine creation + we don't even need a DBAPI to be available. + + Calls to context.execute() here emit the given string to the + script output. + + """ + url = config.get_main_option("sqlalchemy.url") + context.configure( + url=url, + target_metadata=target_metadata, + literal_binds=True, + dialect_opts={"paramstyle": "named"}, + ) + + with context.begin_transaction(): + context.run_migrations() + + +def run_migrations_online() -> None: + """Run migrations in 'online' mode. + + In this scenario we need to create an Engine + and associate a connection with the context. + + """ + connectable = engine_from_config( + config.get_section(config.config_ini_section, {}), + prefix="sqlalchemy.", + poolclass=pool.NullPool, + ) + + with connectable.connect() as connection: + context.configure( + connection=connection, target_metadata=target_metadata + ) + + with context.begin_transaction(): + context.run_migrations() + + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/mcpgateway/schemas.py b/mcpgateway/schemas.py index 7ffb07d9..966d027c 100644 --- a/mcpgateway/schemas.py +++ b/mcpgateway/schemas.py @@ -2936,3 +2936,12 @@ class TagInfo(BaseModelWithConfigDict): name: str = Field(..., description="The tag name") stats: TagStats = Field(..., description="Statistics for this tag") entities: Optional[List[TaggedEntity]] = Field(default_factory=list, description="Entities that have this tag") + + +class TopPerformer(BaseModelWithConfigDict): + id: Union[str, int] = Field(..., description="Entity ID") + name: str = Field(..., description="Entity name") + execution_count: int = Field(..., description="Number of executions") + avg_response_time: Optional[float] = Field(None, description="Average response time in seconds") + success_rate: Optional[float] = Field(None, description="Success rate percentage") + last_execution: Optional[datetime] = Field(None, description="Timestamp of last execution") diff --git a/mcpgateway/services/gateway_service.py b/mcpgateway/services/gateway_service.py index d7bb4405..73fa3227 100644 --- a/mcpgateway/services/gateway_service.py +++ b/mcpgateway/services/gateway_service.py @@ -180,6 +180,9 @@ class GatewayService: - Active/inactive status management """ + + + def __init__(self) -> None: """Initialize the gateway service. diff --git a/mcpgateway/services/prompt_service.py b/mcpgateway/services/prompt_service.py index 8094ddd6..0f1ced24 100644 --- a/mcpgateway/services/prompt_service.py +++ b/mcpgateway/services/prompt_service.py @@ -23,7 +23,7 @@ # Third-Party from jinja2 import Environment, meta, select_autoescape -from sqlalchemy import delete, func, not_, select +from sqlalchemy import delete, func, not_, select, desc, case from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import Session @@ -33,7 +33,7 @@ from mcpgateway.db import PromptMetric, server_prompt_association from mcpgateway.models import Message, PromptResult, Role, TextContent from mcpgateway.plugins import GlobalContext, PluginManager, PluginViolationError, PromptPosthookPayload, PromptPrehookPayload -from mcpgateway.schemas import PromptCreate, PromptRead, PromptUpdate +from mcpgateway.schemas import PromptCreate, PromptRead, PromptUpdate, TopPerformer from mcpgateway.services.logging_service import LoggingService # Initialize logging service first @@ -139,6 +139,34 @@ async def shutdown(self) -> None: self._event_subscribers.clear() logger.info("Prompt service shutdown complete") + + async def get_top_prompts(self, db: Session, limit: int = 5) -> List[TopPerformer]: + results = db.query( + DbPrompt.id, + DbPrompt.name, + func.count(PromptMetric.id).label('execution_count'), + func.avg(PromptMetric.response_time).label('avg_response_time'), + (func.sum(case((PromptMetric.is_success, 1), else_=0)) / func.count(PromptMetric.id) * 100).label('success_rate'), + func.max(PromptMetric.timestamp).label('last_execution') + ).outerjoin( + PromptMetric + ).group_by( + DbPrompt.id, DbPrompt.name + ).order_by( + desc('execution_count') + ).limit(limit).all() + + return [ + TopPerformer( + id=result.id, + name=result.name, + execution_count=result.execution_count or 0, + avg_response_time=float(result.avg_response_time) if result.avg_response_time else None, + success_rate=float(result.success_rate) if result.success_rate else None, + last_execution=result.last_execution + ) for result in results + ] + def _convert_db_prompt(self, db_prompt: DbPrompt) -> Dict[str, Any]: """ Convert a DbPrompt instance to a dictionary matching the PromptRead schema, diff --git a/mcpgateway/services/resource_service.py b/mcpgateway/services/resource_service.py index 1030b74d..c40c6e0b 100644 --- a/mcpgateway/services/resource_service.py +++ b/mcpgateway/services/resource_service.py @@ -33,7 +33,7 @@ # Third-Party import parse -from sqlalchemy import delete, func, not_, select +from sqlalchemy import delete, func, not_, select, desc, case from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import Session @@ -49,6 +49,7 @@ ResourceRead, ResourceSubscription, ResourceUpdate, + TopPerformer ) from mcpgateway.services.logging_service import LoggingService @@ -117,6 +118,33 @@ async def shutdown(self) -> None: # Clear subscriptions self._event_subscribers.clear() logger.info("Resource service shutdown complete") + + async def get_top_resources(self, db: Session, limit: int = 5) -> List[TopPerformer]: + results = db.query( + DbResource.id, + DbResource.uri.label('name'), # Using URI as the name field for TopPerformer + func.count(ResourceMetric.id).label('execution_count'), + func.avg(ResourceMetric.response_time).label('avg_response_time'), + (func.sum(case((ResourceMetric.is_success , 1), else_=0)) / func.count(ResourceMetric.id) * 100).label('success_rate'), + func.max(ResourceMetric.timestamp).label('last_execution') + ).outerjoin( + ResourceMetric + ).group_by( + DbResource.id, DbResource.uri + ).order_by( + desc('execution_count') + ).limit(limit).all() + + return [ + TopPerformer( + id=result.id, + name=result.name, + execution_count=result.execution_count or 0, + avg_response_time=float(result.avg_response_time) if result.avg_response_time else None, + success_rate=float(result.success_rate) if result.success_rate else None, + last_execution=result.last_execution + ) for result in results + ] def _convert_resource_to_read(self, resource: DbResource) -> ResourceRead: """ diff --git a/mcpgateway/services/server_service.py b/mcpgateway/services/server_service.py index 96b7c37a..87d30a05 100644 --- a/mcpgateway/services/server_service.py +++ b/mcpgateway/services/server_service.py @@ -19,7 +19,7 @@ # Third-Party import httpx -from sqlalchemy import delete, func, not_, select +from sqlalchemy import delete, func, not_, select, case, desc from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import Session @@ -30,7 +30,7 @@ from mcpgateway.db import Server as DbServer from mcpgateway.db import ServerMetric from mcpgateway.db import Tool as DbTool -from mcpgateway.schemas import ServerCreate, ServerMetrics, ServerRead, ServerUpdate +from mcpgateway.schemas import ServerCreate, ServerMetrics, ServerRead, ServerUpdate, TopPerformer from mcpgateway.services.logging_service import LoggingService # Initialize logging service first @@ -126,6 +126,35 @@ async def shutdown(self) -> None: await self._http_client.aclose() logger.info("Server service shutdown complete") + +# get_top_server + async def get_top_servers(self, db: Session, limit: int = 5) -> List[TopPerformer]: + results = db.query( + DbServer.id, + DbServer.name, + func.count(ServerMetric.id).label('execution_count'), + func.avg(ServerMetric.response_time).label('avg_response_time'), + (func.sum(case((ServerMetric.is_success, 1), else_=0)) / func.count(ServerMetric.id) * 100).label('success_rate'), + func.max(ServerMetric.timestamp).label('last_execution') # Using timestamp instead of created_at + ).outerjoin( + ServerMetric + ).group_by( + DbServer.id, DbServer.name + ).order_by( + desc('execution_count') + ).limit(limit).all() + + return [ + TopPerformer( + id=result.id, + name=result.name, + execution_count=result.execution_count or 0, + avg_response_time=float(result.avg_response_time) if result.avg_response_time else None, + success_rate=float(result.success_rate) if result.success_rate else None, + last_execution=result.last_execution + ) for result in results + ] + def _convert_server_to_read(self, server: DbServer) -> ServerRead: """ Converts a DbServer instance into a ServerRead model, including aggregated metrics. diff --git a/mcpgateway/services/tool_service.py b/mcpgateway/services/tool_service.py index 55c94727..258b1e10 100644 --- a/mcpgateway/services/tool_service.py +++ b/mcpgateway/services/tool_service.py @@ -28,7 +28,7 @@ from mcp import ClientSession from mcp.client.sse import sse_client from mcp.client.streamable_http import streamablehttp_client -from sqlalchemy import delete, func, not_, select +from sqlalchemy import delete, func, not_, select, case, desc from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import Session @@ -45,6 +45,7 @@ ToolCreate, ToolRead, ToolUpdate, + TopPerformer ) from mcpgateway.services.logging_service import LoggingService from mcpgateway.utils.create_slug import slugify @@ -193,6 +194,34 @@ async def shutdown(self) -> None: await self._http_client.aclose() logger.info("Tool service shutdown complete") + + async def get_top_tools(self, db: Session, limit: int = 5) -> List[TopPerformer]: + results = db.query( + DbTool.id, + DbTool.name, + func.count(ToolMetric.id).label('execution_count'), + func.avg(ToolMetric.response_time).label('avg_response_time'), + (func.sum(case((ToolMetric.is_success , 1), else_=0)) / func.count(ToolMetric.id) * 100).label('success_rate'), + func.max(ToolMetric.timestamp).label('last_execution') + ).outerjoin( + ToolMetric + ).group_by( + DbTool.id, DbTool.name + ).order_by( + desc('execution_count') + ).limit(limit).all() + + return [ + TopPerformer( + id=result.id, + name=result.name, + execution_count=result.execution_count or 0, + avg_response_time=float(result.avg_response_time) if result.avg_response_time else None, + success_rate=float(result.success_rate) if result.success_rate else None, + last_execution=result.last_execution + ) for result in results + ] + def _convert_tool_to_read(self, tool: DbTool) -> ToolRead: """Converts a DbTool instance into a ToolRead model, including aggregated metrics and new API gateway fields: request_type and authentication credentials (masked). @@ -334,10 +363,12 @@ async def register_tool(self, db: Session, tool: ToolCreate) -> ToolRead: logger.info(f"Registered tool: {db_tool.name}") return self._convert_tool_to_read(db_tool) except IntegrityError as ie: + db.rollback() logger.error(f"IntegrityError during tool registration: {ie}") - raise ie - except Exception as ex: - raise ToolError(f"Failed to register tool: {str(ex)}") + raise ToolError(f"Tool already exists: {tool.name}") + except Exception as e: + db.rollback() + raise ToolError(f"Failed to register tool: {str(e)}") async def list_tools( self, db: Session, include_inactive: bool = False, cursor: Optional[str] = None, tags: Optional[List[str]] = None, _request_headers: Optional[Dict[str, str]] = None diff --git a/mcpgateway/static/admin.js b/mcpgateway/static/admin.js index 18f4d24b..2a1d6af9 100644 --- a/mcpgateway/static/admin.js +++ b/mcpgateway/static/admin.js @@ -842,6 +842,7 @@ function showMetricsPlaceholder() { // ENHANCED METRICS DISPLAY with Complete System Overview // =================================================================== + function displayMetrics(data) { const metricsPanel = safeGetElement("metrics-panel"); if (!metricsPanel) { @@ -890,7 +891,9 @@ function displayMetrics(data) { // Top Performers section (before individual metrics) if (data.topPerformers || data.top) { const topData = data.topPerformers || data.top; - const topSection = createTopPerformersSection(topData); + // const topSection = createTopPerformersSection(topData); + const topSection = createEnhancedTopPerformersSection(topData); + mainContainer.appendChild(topSection); } @@ -1198,52 +1201,354 @@ function extractKPIData(data) { /** * SECURITY: Create top performers section with safe display */ -function createTopPerformersSection(topData) { +// function createTopPerformersSection(topData) { +// try { +// const section = document.createElement("div"); +// section.className = "bg-white rounded-lg shadow p-6 dark:bg-gray-800"; + +// const title = document.createElement("h3"); +// title.className = "text-lg font-medium mb-4 dark:text-gray-200"; +// title.textContent = "Top Performers"; +// section.appendChild(title); + +// const grid = document.createElement("div"); +// grid.className = "grid grid-cols-1 md:grid-cols-2 gap-4"; + +// // Top Tools +// if (topData.tools && Array.isArray(topData.tools)) { +// const toolsCard = createTopItemCard("Tools", topData.tools); +// grid.appendChild(toolsCard); +// } + +// // Top Resources +// if (topData.resources && Array.isArray(topData.resources)) { +// const resourcesCard = createTopItemCard( +// "Resources", +// topData.resources, +// ); +// grid.appendChild(resourcesCard); +// } + +// // Top Prompts +// if (topData.prompts && Array.isArray(topData.prompts)) { +// const promptsCard = createTopItemCard("Prompts", topData.prompts); +// grid.appendChild(promptsCard); +// } + +// // Top Servers +// if (topData.servers && Array.isArray(topData.servers)) { +// const serversCard = createTopItemCard("Servers", topData.servers); +// grid.appendChild(serversCard); +// } + +// section.appendChild(grid); +// return section; +// } catch (error) { +// console.error("Error creating top performers section:", error); +// return document.createElement("div"); // Safe fallback +// } +// } +function createEnhancedTopPerformersSection(topData) { try { - const section = document.createElement("div"); - section.className = "bg-white rounded-lg shadow p-6 dark:bg-gray-800"; - - const title = document.createElement("h3"); - title.className = "text-lg font-medium mb-4 dark:text-gray-200"; - title.textContent = "Top Performers"; + const section = document.createElement('div'); + section.className = 'bg-white rounded-lg shadow p-6 dark:bg-gray-800'; + + const title = document.createElement('h3'); + title.className = 'text-lg font-medium mb-4 dark:text-gray-200'; + title.textContent = 'Top Performers'; + title.setAttribute('aria-label', 'Top Performers Section'); section.appendChild(title); + + // Loading skeleton + const skeleton = document.createElement('div'); + skeleton.className = 'animate-pulse space-y-4'; + skeleton.innerHTML = ` +
+
+
+
+
`; + section.appendChild(skeleton); + + // Tabs + const tabsContainer = document.createElement('div'); + tabsContainer.className = 'border-b border-gray-200 dark:border-gray-700'; + const tabList = document.createElement('nav'); + tabList.className = '-mb-px flex space-x-8 overflow-x-auto'; + tabList.setAttribute('aria-label', 'Top Performers Tabs'); + + const entityTypes = ['tools', 'resources', 'prompts', 'gateways', 'servers']; + entityTypes.forEach((type, index) => { + if (topData[type] && Array.isArray(topData[type])) { + const tab = createTab(type, index === 0); + tabList.appendChild(tab); + } + }); + + tabsContainer.appendChild(tabList); + section.appendChild(tabsContainer); + + // Content panels + const contentContainer = document.createElement('div'); + contentContainer.className = 'mt-4'; + + entityTypes.forEach((type, index) => { + if (topData[type] && Array.isArray(topData[type])) { + const panel = createTopPerformersTable(type, topData[type], index === 0); + contentContainer.appendChild(panel); + } + }); + + section.appendChild(contentContainer); + + // Remove skeleton once data is loaded + setTimeout(() => skeleton.remove(), 500); // Simulate async data load + + // Export button + const exportButton = document.createElement('button'); + exportButton.className = 'mt-4 bg-indigo-600 text-white px-4 py-2 rounded hover:bg-indigo-700 dark:bg-indigo-500 dark:hover:bg-indigo-600'; + exportButton.textContent = 'Export Metrics'; + exportButton.onclick = () => exportMetricsToCSV(topData); + section.appendChild(exportButton); + + return section; + } catch (error) { + console.error('Error creating enhanced top performers section:', error); + showErrorMessage('Failed to load top performers section'); + return document.createElement('div'); + } +} - const grid = document.createElement("div"); - grid.className = "grid grid-cols-1 md:grid-cols-2 gap-4"; +function createTopPerformersTable(entityType, data, isActive) { + const panel = document.createElement('div'); + panel.id = `top-${entityType}-panel`; + panel.className = `transition-opacity duration-300 ${isActive ? 'opacity-100' : 'hidden opacity-0'}`; + panel.setAttribute('role', 'tabpanel'); + panel.setAttribute('aria-labelledby', `top-${entityType}-tab`); + + if (data.length === 0) { + const emptyState = document.createElement('p'); + emptyState.className = 'text-gray-500 dark:text-gray-400 text-center py-4'; + emptyState.textContent = `No ${entityType} data available`; + panel.appendChild(emptyState); + return panel; + } + + // Responsive table wrapper + const tableWrapper = document.createElement('div'); + tableWrapper.className = 'overflow-x-auto sm:overflow-x-visible'; + + const table = document.createElement('table'); + table.className = 'min-w-full divide-y divide-gray-200 dark:divide-gray-700'; + + // Table header + const thead = document.createElement('thead'); + thead.className = 'bg-gray-50 dark:bg-gray-700 hidden sm:table-header-group'; + const headerRow = document.createElement('tr'); + const headers = ['Rank', 'Name', 'Executions', 'Avg Response Time', 'Success Rate', 'Last Used']; + + headers.forEach((headerText, index) => { + const th = document.createElement('th'); + th.className = 'px-6 py-3 text-left text-xs font-medium text-gray-500 dark:text-gray-300 uppercase tracking-wider'; + th.setAttribute('scope', 'col'); + th.textContent = headerText; + if (index === 0) th.setAttribute('aria-sort', 'ascending'); + headerRow.appendChild(th); + }); + + thead.appendChild(headerRow); + table.appendChild(thead); + + // Table body + const tbody = document.createElement('tbody'); + tbody.className = 'bg-white dark:bg-gray-800 divide-y divide-gray-200 dark:divide-gray-700'; + + // Pagination (if > 5 items) + const paginatedData = data.slice(0, 5); // Limit to top 5 + paginatedData.forEach((item, index) => { + const row = document.createElement('tr'); + row.className = 'hover:bg-gray-50 dark:hover:bg-gray-700 transition-colors duration-200'; + + // Rank + const rankCell = document.createElement('td'); + rankCell.className = 'px-6 py-4 whitespace-nowrap text-sm font-medium text-gray-900 dark:text-gray-100 sm:px-6 sm:py-4'; + const rankBadge = document.createElement('span'); + rankBadge.className = `inline-flex items-center justify-center w-6 h-6 rounded-full ${ + index === 0 ? 'bg-yellow-400 text-yellow-900' : + index === 1 ? 'bg-gray-300 text-gray-900' : + index === 2 ? 'bg-orange-400 text-orange-900' : + 'bg-gray-100 text-gray-600' + }`; + rankBadge.textContent = index + 1; + rankBadge.setAttribute('aria-label', `Rank ${index + 1}`); + rankCell.appendChild(rankBadge); + row.appendChild(rankCell); + + // Name (clickable for drill-down) + const nameCell = document.createElement('td'); + nameCell.className = 'px-6 py-4 whitespace-nowrap text-sm text-indigo-600 dark:text-indigo-400 cursor-pointer'; + nameCell.textContent = escapeHtml(item.name || 'Unknown'); + nameCell.onclick = () => showDetailedMetrics(entityType, item.id); + nameCell.setAttribute('role', 'button'); + nameCell.setAttribute('aria-label', `View details for ${item.name || 'Unknown'}`); + row.appendChild(nameCell); + + // Executions + const execCell = document.createElement('td'); + execCell.className = 'px-6 py-4 whitespace-nowrap text-sm text-gray-500 dark:text-gray-300 sm:px-6 sm:py-4'; + execCell.textContent = formatNumber(item.execution_count || item.executions || 0); + row.appendChild(execCell); + + // Avg Response Time + const avgTimeCell = document.createElement('td'); + avgTimeCell.className = 'px-6 py-4 whitespace-nowrap text-sm text-gray-500 dark:text-gray-300 sm:px-6 sm:py-4'; + const avgTime = item.avg_response_time || item.avgResponseTime; + avgTimeCell.textContent = avgTime ? `${Math.round(avgTime)}ms` : 'N/A'; + row.appendChild(avgTimeCell); + + // Success Rate + const successCell = document.createElement('td'); + successCell.className = 'px-6 py-4 whitespace-nowrap text-sm sm:px-6 sm:py-4'; + const successRate = calculateSuccessRate(item); + const successBadge = document.createElement('span'); + successBadge.className = `inline-flex items-center px-2.5 py-0.5 rounded-full text-xs font-medium ${ + successRate >= 95 ? 'bg-green-100 text-green-800 dark:bg-green-800 dark:text-green-100' : + successRate >= 80 ? 'bg-yellow-100 text-yellow-800 dark:bg-yellow-800 dark:text-yellow-100' : + 'bg-red-100 text-red-800 dark:bg-red-800 dark:text-red-100' + }`; + successBadge.textContent = `${successRate}%`; + successBadge.setAttribute('aria-label', `Success rate: ${successRate}%`); + successCell.appendChild(successBadge); + row.appendChild(successCell); + + // Last Used + const lastUsedCell = document.createElement('td'); + lastUsedCell.className = 'px-6 py-4 whitespace-nowrap text-sm text-gray-500 dark:text-gray-300 sm:px-6 sm:py-4'; + lastUsedCell.textContent = formatLastUsed(item.last_execution || item.lastExecution); + row.appendChild(lastUsedCell); + + tbody.appendChild(row); + }); + + table.appendChild(tbody); + tableWrapper.appendChild(table); + panel.appendChild(tableWrapper); + + // Pagination controls (if needed) + if (data.length > 5) { + const pagination = createPaginationControls(data.length, 5, (page) => { + updateTableRows(panel, entityType, data, page); + }); + panel.appendChild(pagination); + } + + return panel; +} - // Top Tools - if (topData.tools && Array.isArray(topData.tools)) { - const toolsCard = createTopItemCard("Tools", topData.tools); - grid.appendChild(toolsCard); - } +function createTab(type, isActive) { + const tab = document.createElement('a'); + tab.href = '#'; + tab.id = `top-${type}-tab`; + tab.className = `${ + isActive + ? 'border-indigo-500 text-indigo-600 dark:text-indigo-400' + : 'border-transparent text-gray-500 hover:text-gray-700 hover:border-gray-300 dark:text-gray-400 dark:hover:text-gray-300' + } whitespace-nowrap py-4 px-1 border-b-2 font-medium text-sm capitalize transition-colors duration-200 sm:py-4 sm:px-1`; + tab.textContent = type; + tab.setAttribute('role', 'tab'); + tab.setAttribute('aria-controls', `top-${type}-panel`); + tab.setAttribute('aria-selected', isActive.toString()); + tab.onclick = (e) => { + e.preventDefault(); + showTopPerformerTab(type); + }; + return tab; +} - // Top Resources - if (topData.resources && Array.isArray(topData.resources)) { - const resourcesCard = createTopItemCard( - "Resources", - topData.resources, - ); - grid.appendChild(resourcesCard); +function showTopPerformerTab(activeType) { + const entityTypes = ['tools', 'resources', 'prompts', 'gateways', 'servers']; + entityTypes.forEach(type => { + const panel = document.getElementById(`top-${type}-panel`); + const tab = document.getElementById(`top-${type}-tab`); + if (panel) { + panel.classList.toggle('hidden', type !== activeType); + panel.classList.toggle('opacity-100', type === activeType); + panel.classList.toggle('opacity-0', type !== activeType); + panel.setAttribute('aria-hidden', type !== activeType); + } + if (tab) { + tab.classList.toggle('border-indigo-500', type === activeType); + tab.classList.toggle('text-indigo-600', type === activeType); + tab.classList.toggle('dark:text-indigo-400', type === activeType); + tab.classList.toggle('border-transparent', type !== activeType); + tab.classList.toggle('text-gray-500', type !== activeType); + tab.setAttribute('aria-selected', type === activeType); } + }); +} - // Top Prompts - if (topData.prompts && Array.isArray(topData.prompts)) { - const promptsCard = createTopItemCard("Prompts", topData.prompts); - grid.appendChild(promptsCard); - } +function createPaginationControls(totalItems, itemsPerPage, onPageChange) { + const pagination = document.createElement('div'); + pagination.className = 'mt-4 flex justify-end space-x-2'; + const totalPages = Math.ceil(totalItems / itemsPerPage); + + for (let page = 1; page <= totalPages; page++) { + const button = document.createElement('button'); + button.className = `px-3 py-1 rounded ${page === 1 ? 'bg-indigo-600 text-white' : 'bg-gray-200 text-gray-700 dark:bg-gray-700 dark:text-gray-300'}`; + button.textContent = page; + button.onclick = () => { + onPageChange(page); + pagination.querySelectorAll('button').forEach(btn => { + btn.className = `px-3 py-1 rounded ${btn === button ? 'bg-indigo-600 text-white' : 'bg-gray-200 text-gray-700 dark:bg-gray-700 dark:text-gray-300'}`; + }); + }; + pagination.appendChild(button); + } + + return pagination; +} - // Top Servers - if (topData.servers && Array.isArray(topData.servers)) { - const serversCard = createTopItemCard("Servers", topData.servers); - grid.appendChild(serversCard); - } +function updateTableRows(panel, entityType, data, page) { + const tbody = panel.querySelector('tbody'); + tbody.innerHTML = ''; + const start = (page - 1) * 5; + const paginatedData = data.slice(start, start + 5); + + paginatedData.forEach((item, index) => { + const row = document.createElement('tr'); + // ... (same row creation logic as in createTopPerformersTable) + tbody.appendChild(row); + }); +} - section.appendChild(grid); - return section; - } catch (error) { - console.error("Error creating top performers section:", error); - return document.createElement("div"); // Safe fallback - } +function exportMetricsToCSV(topData) { + const headers = ['Entity Type', 'Rank', 'Name', 'Executions', 'Avg Response Time', 'Success Rate', 'Last Used']; + const rows = []; + + ['tools', 'resources', 'prompts', 'gateways', 'servers'].forEach(type => { + if (topData[type] && Array.isArray(topData[type])) { + topData[type].forEach((item, index) => { + rows.push([ + type, + index + 1, + `"${escapeHtml(item.name || 'Unknown')}"`, + formatNumber(item.execution_count || item.executions || 0), + item.avg_response_time || item.avgResponseTime ? `${Math.round(item.avg_response_time || item.avgResponseTime)}ms` : 'N/A', + `${calculateSuccessRate(item)}%`, + formatLastUsed(item.last_execution || item.lastExecution) + ]); + }); + } + }); + + const csv = [headers.join(','), ...rows.map(row => row.join(','))].join('\n'); + const blob = new Blob([csv], { type: 'text/csv' }); + const url = URL.createObjectURL(blob); + const a = document.createElement('a'); + a.href = url; + a.download = `top_performers_${new Date().toISOString()}.csv`; + a.click(); + URL.revokeObjectURL(url); } /** From e17c6c37a92377c1b62ba8f830cac018912190ff Mon Sep 17 00:00:00 2001 From: IrushaBasukala Date: Mon, 4 Aug 2025 12:53:42 +0545 Subject: [PATCH 02/11] fixing error Signed-off-by: IrushaBasukala --- MANIFEST.in | 1 + mcpgateway/admin.py | 24 +++- mcpgateway/alembic/README.md | 172 ++++++++++++++++++++++++ mcpgateway/alembic/script.py.mako | 28 ++++ mcpgateway/handlers/sampling.py | 2 +- mcpgateway/migrations/env.py | 6 + mcpgateway/schemas.py | 13 ++ mcpgateway/services/gateway_service.py | 3 - mcpgateway/services/prompt_service.py | 54 +++++--- mcpgateway/services/resource_service.py | 55 +++++--- mcpgateway/services/server_service.py | 56 +++++--- mcpgateway/services/tool_service.py | 56 +++++--- tests/unit/mcpgateway/test_admin.py | 8 +- 13 files changed, 400 insertions(+), 78 deletions(-) create mode 100644 mcpgateway/alembic/README.md create mode 100644 mcpgateway/alembic/script.py.mako diff --git a/MANIFEST.in b/MANIFEST.in index 95f430a1..8bf2ca97 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -13,6 +13,7 @@ include Containerfile.lite include __init__ include alembic.ini include tox.ini +include alembic/README # 2️⃣ Top-level config, examples and helper scripts include *.py diff --git a/mcpgateway/admin.py b/mcpgateway/admin.py index 78aa24c4..0d88a4a1 100644 --- a/mcpgateway/admin.py +++ b/mcpgateway/admin.py @@ -4012,8 +4012,28 @@ async def admin_delete_root(uri: str, request: Request, user: str = Depends(requ # "prompts": prompt_metrics, # } + @admin_router.get("/metrics") async def get_aggregated_metrics(db: Session = Depends(get_db)) -> Dict[str, Any]: + """Retrieve aggregated metrics and top performers for all entity types. + + This endpoint collects usage metrics and top-performing entities for tools, + resources, prompts, and servers by calling the respective service methods. + The results are compiled into a dictionary for administrative monitoring. + + Args: + db (Session): Database session dependency for querying metrics. + + Returns: + Dict[str, Any]: A dictionary containing aggregated metrics and top performers + for tools, resources, prompts, and servers. The structure includes: + - 'tools': Metrics for tools. + - 'resources': Metrics for resources. + - 'prompts': Metrics for prompts. + - 'servers': Metrics for servers. + - 'topPerformers': A nested dictionary with top 5 tools, resources, prompts, + and servers. + """ metrics = { "tools": await tool_service.aggregate_metrics(db), "resources": await resource_service.aggregate_metrics(db), @@ -4023,8 +4043,8 @@ async def get_aggregated_metrics(db: Session = Depends(get_db)) -> Dict[str, Any "tools": await tool_service.get_top_tools(db, limit=5), "resources": await resource_service.get_top_resources(db, limit=5), "prompts": await prompt_service.get_top_prompts(db, limit=5), - "servers": await server_service.get_top_servers(db, limit=5) - } + "servers": await server_service.get_top_servers(db, limit=5), + }, } } return metrics diff --git a/mcpgateway/alembic/README.md b/mcpgateway/alembic/README.md new file mode 100644 index 00000000..081ee38b --- /dev/null +++ b/mcpgateway/alembic/README.md @@ -0,0 +1,172 @@ +# Alembic Migration Guide for `mcpgateway` + +> Creating, applying, and managing schema migrations with Alembic. + +--- + +## Table of Contents + +1. [Why Alembic?](#why-alembic) +2. [Prerequisites](#prerequisites) +3. [Directory Layout](#directory-layout) +4. [Everyday Workflow](#everyday-workflow) +5. [Helpful Make Targets](#helpful-make-targets) +6. [Troubleshooting](#troubleshooting) +7. [Further Reading](#further-reading) + +--- + +## Why Alembic? + +- **Versioned DDL** - Revisions are timestamped, diff-able, and reversible. +- **Autogeneration** - Detects model vs. DB drift and writes `op.create_table`, `op.add_column`, etc. +- **Multi-DB Support** - Works with SQLite, PostgreSQL, MySQL-anything SQLAlchemy supports. +- **Zero Runtime Cost** - Only runs when you call it (dev, CI, deploy). + +--- + +## Prerequisites + +```bash +# Activate your virtual environment first +pip install --upgrade alembic +``` + +You do not need to set up `alembic.ini`, `env.py`, or metadata wiring - they're already configured. + +--- + +## Directory Layout + +``` +alembic.ini +alembic/ +├── env.py +├── script.py.mako +└── versions/ + ├── 20250626235501_initial_schema.py + └── ... +``` + +* `alembic.ini`: Configuration file +* `env.py`: Connects Alembic to your models and DB settings +* `script.py.mako`: Template for new revisions (keep this!) +* `versions/`: Contains all migration scripts + +--- + +## Everyday Workflow + +> **1 Edit → 2 Revision → 3 Upgrade** + +| Step | What you do | +| ------------------------ | ----------------------------------------------------------------------------- | +| **1. Change models** | Modify SQLAlchemy models in `mcpgateway.db` or its submodules. | +| **2. Generate revision** | Run: `MSG="add users table"` then `alembic revision --autogenerate -m "$MSG"` | +| **3. Review** | Open the new file in `alembic/versions/`. Verify the operations are correct. | +| **4. Upgrade DB** | Run: `alembic upgrade head` | +| **5. Commit** | Run: `git add alembic/versions/*.py` | + +### Other Common Commands + +```bash +alembic -c mcpgateway/alembic.ini current # Show current DB revision +alembic history --verbose # Show all migrations and their order +alembic downgrade -1 # Roll back one revision +alembic downgrade # Roll back to a specific revision hash +``` + +--- + +## ✅ Make Targets: Alembic Migration Commands + +These targets help you manage database schema migrations using Alembic. + +> You must have a valid `alembic/` setup and a working SQLAlchemy model base (`Base.metadata`). + +--- + +### 💡 List all available targets (with help) + +```bash +make help +``` + +This will include the Alembic section: + +``` +# 🛢️ Alembic tasks +db-new Autogenerate revision (MSG="title") +db-up Upgrade DB to head +db-down Downgrade one step (REV=-1 or hash) +db-current Show current DB revision +db-history List the migration graph +``` + +--- + +### 🔨 Commands + +| Command | Description | +| -------------------------- | ------------------------------------------------------ | +| `make db-new MSG="..."` | Generate a new migration based on model changes. | +| `make db-up` | Apply all unapplied migrations. | +| `make db-down` | Roll back the latest migration (`REV=-1` by default). | +| `make db-down REV=abc1234` | Roll back to a specific revision by hash. | +| `make db-current` | Print the current revision ID applied to the database. | +| `make db-history` | Show the full migration history and graph. | + +--- + +### 📌 Examples + +```bash +# Create a new migration with a custom message +make db-new MSG="add users table" + +# Apply it to the database +make db-up + +# Downgrade the last migration +make db-down + +# Downgrade to a specific revision +make db-down REV=cf1283d7fa92 + +# Show the current applied revision +make db-current + +# Show all migration history +make db-history +``` + +--- + +### 🛑 Notes + +* You must **edit models first** before `make db-new` generates anything useful. +* Always **review generated migration files** before committing. +* Don't forget to run `make db-up` on CI or deploy if using migrations to manage schema. + +--- + +## Troubleshooting + +| Symptom | Cause / Fix | +| ---------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------- | +| **Empty migration (`pass`)** | Alembic couldn't detect models. Make sure all model classes are imported before `Base.metadata` is used (already handled in your `env.py`). | +| **`Can't locate revision ...`** | You deleted or renamed a revision file that the DB is pointing to. Either restore it or run `alembic stamp base` and recreate the revision. | +| **`script.py.mako` missing** | This file is required. Run `alembic init alembic` in a temp folder and copy the missing template into your project. | +| **SQLite foreign key limitations** | SQLite doesn't allow dropping constraints. Use `create table → copy → drop` flow manually, or plan around it. | +| **DB not updating** | Did you forget to run `alembic upgrade head`? Check with `alembic -c mcpgateway/alembic.ini current`. | +| **Wrong DB URL or config errors** | Confirm `settings.database_url` is valid. Check `env.py` and your `.env`/config settings. Alembic ignores `alembic.ini` for URLs in your setup. | +| **Model changes not detected** | Alembic only picks up declarative models in `Base.metadata`. Ensure all models are imported and not behind `if TYPE_CHECKING:` or other lazy imports. | + +--- + +## Further Reading + +* Official docs: [https://alembic.sqlalchemy.org](https://alembic.sqlalchemy.org) +* Autogenerate docs: [https://alembic.sqlalchemy.org/en/latest/autogenerate.html](https://alembic.sqlalchemy.org/en/latest/autogenerate.html) + +--- \ No newline at end of file diff --git a/mcpgateway/alembic/script.py.mako b/mcpgateway/alembic/script.py.mako new file mode 100644 index 00000000..fbf1fe34 --- /dev/null +++ b/mcpgateway/alembic/script.py.mako @@ -0,0 +1,28 @@ +"""${message} + +Revision ID: ${up_revision} +Revises: ${down_revision | comma,n} +Create Date: ${create_date} + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa +${imports if imports else ""} + +# revision identifiers, used by Alembic. +revision: str = ${repr(up_revision)} +down_revision: Union[str, Sequence[str], None] = ${repr(down_revision)} +branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)} +depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)} + + +def upgrade() -> None: + """Upgrade schema.""" + ${upgrades if upgrades else "pass"} + + +def downgrade() -> None: + """Downgrade schema.""" + ${downgrades if downgrades else "pass"} \ No newline at end of file diff --git a/mcpgateway/handlers/sampling.py b/mcpgateway/handlers/sampling.py index bcda47d9..a8e805ff 100644 --- a/mcpgateway/handlers/sampling.py +++ b/mcpgateway/handlers/sampling.py @@ -221,7 +221,7 @@ async def create_message(self, db: Session, request: Dict[str, Any]) -> CreateMe # FIXME: Implement actual model sampling - currently returns mock response # For now return mock response response = self._mock_sample(messages=messages) - + # Convert to result return CreateMessageResult( content=TextContent(type="text", text=response), diff --git a/mcpgateway/migrations/env.py b/mcpgateway/migrations/env.py index 36112a3c..521caedb 100644 --- a/mcpgateway/migrations/env.py +++ b/mcpgateway/migrations/env.py @@ -1,3 +1,8 @@ +"""Alembic environment configuration for database migrations. + +This module sets up the Alembic migration environment, configuring the database connection +and metadata for running migrations in both online and offline modes. +""" from logging.config import fileConfig from sqlalchemy import engine_from_config @@ -73,6 +78,7 @@ def run_migrations_online() -> None: if context.is_offline_mode(): + run_migrations_offline() else: run_migrations_online() diff --git a/mcpgateway/schemas.py b/mcpgateway/schemas.py index 966d027c..6d01eca8 100644 --- a/mcpgateway/schemas.py +++ b/mcpgateway/schemas.py @@ -2939,6 +2939,19 @@ class TagInfo(BaseModelWithConfigDict): class TopPerformer(BaseModelWithConfigDict): + """Schema for representing top-performing entities with performance metrics. + + Used to encapsulate metrics for entities such as prompts, resources, servers, or tools, + including execution count, average response time, success rate, and last execution timestamp. + + Attributes: + id (Union[str, int]): Unique identifier for the entity. + name (str): Name of the entity (e.g., prompt name, resource URI, server name, or tool name). + execution_count (int): Total number of executions for the entity. + avg_response_time (Optional[float]): Average response time in seconds, or None if no metrics. + success_rate (Optional[float]): Success rate percentage, or None if no metrics. + last_execution (Optional[datetime]): Timestamp of the last execution, or None if no metrics. + """ id: Union[str, int] = Field(..., description="Entity ID") name: str = Field(..., description="Entity name") execution_count: int = Field(..., description="Number of executions") diff --git a/mcpgateway/services/gateway_service.py b/mcpgateway/services/gateway_service.py index 73fa3227..d7bb4405 100644 --- a/mcpgateway/services/gateway_service.py +++ b/mcpgateway/services/gateway_service.py @@ -180,9 +180,6 @@ class GatewayService: - Active/inactive status management """ - - - def __init__(self) -> None: """Initialize the gateway service. diff --git a/mcpgateway/services/prompt_service.py b/mcpgateway/services/prompt_service.py index 0f1ced24..fe218735 100644 --- a/mcpgateway/services/prompt_service.py +++ b/mcpgateway/services/prompt_service.py @@ -139,22 +139,41 @@ async def shutdown(self) -> None: self._event_subscribers.clear() logger.info("Prompt service shutdown complete") - async def get_top_prompts(self, db: Session, limit: int = 5) -> List[TopPerformer]: - results = db.query( - DbPrompt.id, - DbPrompt.name, - func.count(PromptMetric.id).label('execution_count'), - func.avg(PromptMetric.response_time).label('avg_response_time'), - (func.sum(case((PromptMetric.is_success, 1), else_=0)) / func.count(PromptMetric.id) * 100).label('success_rate'), - func.max(PromptMetric.timestamp).label('last_execution') - ).outerjoin( - PromptMetric - ).group_by( - DbPrompt.id, DbPrompt.name - ).order_by( - desc('execution_count') - ).limit(limit).all() + """Retrieve the top-performing prompts based on execution count. + + Queries the database to get prompts with their metrics, ordered by the number of executions + in descending order. Returns a list of TopPerformer objects containing prompt details and + performance metrics. + + Args: + db (Session): Database session for querying prompt metrics. + limit (int, optional): Maximum number of prompts to return. Defaults to 5. + + Returns: + List[TopPerformer]: A list of TopPerformer objects, each containing: + - id: Prompt ID. + - name: Prompt name. + - execution_count: Total number of executions. + - avg_response_time: Average response time in seconds, or None if no metrics. + - success_rate: Success rate percentage, or None if no metrics. + - last_execution: Timestamp of the last execution, or None if no metrics. + """ + results = ( + db.query( + DbPrompt.id, + DbPrompt.name, + func.count(PromptMetric.id).label("execution_count"),# pylint: disable=not-callable + func.avg(PromptMetric.response_time).label("avg_response_time"), + (func.sum(case((PromptMetric.is_success, 1), else_=0)) / func.count(PromptMetric.id) * 100).label("success_rate"), + func.max(PromptMetric.timestamp).label("last_execution"), + ) + .outerjoin(PromptMetric) + .group_by(DbPrompt.id, DbPrompt.name) + .order_by(desc("execution_count")) + .limit(limit) + .all() + ) return [ TopPerformer( @@ -163,8 +182,9 @@ async def get_top_prompts(self, db: Session, limit: int = 5) -> List[TopPerforme execution_count=result.execution_count or 0, avg_response_time=float(result.avg_response_time) if result.avg_response_time else None, success_rate=float(result.success_rate) if result.success_rate else None, - last_execution=result.last_execution - ) for result in results + last_execution=result.last_execution, + ) + for result in results ] def _convert_db_prompt(self, db_prompt: DbPrompt) -> Dict[str, Any]: diff --git a/mcpgateway/services/resource_service.py b/mcpgateway/services/resource_service.py index c40c6e0b..e60c08f4 100644 --- a/mcpgateway/services/resource_service.py +++ b/mcpgateway/services/resource_service.py @@ -118,22 +118,42 @@ async def shutdown(self) -> None: # Clear subscriptions self._event_subscribers.clear() logger.info("Resource service shutdown complete") - + async def get_top_resources(self, db: Session, limit: int = 5) -> List[TopPerformer]: - results = db.query( - DbResource.id, - DbResource.uri.label('name'), # Using URI as the name field for TopPerformer - func.count(ResourceMetric.id).label('execution_count'), - func.avg(ResourceMetric.response_time).label('avg_response_time'), - (func.sum(case((ResourceMetric.is_success , 1), else_=0)) / func.count(ResourceMetric.id) * 100).label('success_rate'), - func.max(ResourceMetric.timestamp).label('last_execution') - ).outerjoin( - ResourceMetric - ).group_by( - DbResource.id, DbResource.uri - ).order_by( - desc('execution_count') - ).limit(limit).all() + """Retrieve the top-performing resources based on execution count. + + Queries the database to get resources with their metrics, ordered by the number of executions + in descending order. Uses the resource URI as the name field for TopPerformer objects. + Returns a list of TopPerformer objects containing resource details and performance metrics. + + Args: + db (Session): Database session for querying resource metrics. + limit (int, optional): Maximum number of resources to return. Defaults to 5. + + Returns: + List[TopPerformer]: A list of TopPerformer objects, each containing: + - id: Resource ID. + - name: Resource URI (used as the name field). + - execution_count: Total number of executions. + - avg_response_time: Average response time in seconds, or None if no metrics. + - success_rate: Success rate percentage, or None if no metrics. + - last_execution: Timestamp of the last execution, or None if no metrics. + """ + results = ( + db.query( + DbResource.id, + DbResource.uri.label("name"), # Using URI as the name field for TopPerformer + func.count(ResourceMetric.id).label("execution_count"),# pylint: disable=not-callable + func.avg(ResourceMetric.response_time).label("avg_response_time"), + (func.sum(case((ResourceMetric.is_success, 1), else_=0)) / func.count(ResourceMetric.id) * 100).label("success_rate"), + func.max(ResourceMetric.timestamp).label("last_execution"), + ) + .outerjoin(ResourceMetric) + .group_by(DbResource.id, DbResource.uri) + .order_by(desc("execution_count")) + .limit(limit) + .all() + ) return [ TopPerformer( @@ -142,8 +162,9 @@ async def get_top_resources(self, db: Session, limit: int = 5) -> List[TopPerfor execution_count=result.execution_count or 0, avg_response_time=float(result.avg_response_time) if result.avg_response_time else None, success_rate=float(result.success_rate) if result.success_rate else None, - last_execution=result.last_execution - ) for result in results + last_execution=result.last_execution, + ) + for result in results ] def _convert_resource_to_read(self, resource: DbResource) -> ResourceRead: diff --git a/mcpgateway/services/server_service.py b/mcpgateway/services/server_service.py index 87d30a05..b6d8e798 100644 --- a/mcpgateway/services/server_service.py +++ b/mcpgateway/services/server_service.py @@ -126,23 +126,42 @@ async def shutdown(self) -> None: await self._http_client.aclose() logger.info("Server service shutdown complete") - -# get_top_server + # get_top_server async def get_top_servers(self, db: Session, limit: int = 5) -> List[TopPerformer]: - results = db.query( - DbServer.id, - DbServer.name, - func.count(ServerMetric.id).label('execution_count'), - func.avg(ServerMetric.response_time).label('avg_response_time'), - (func.sum(case((ServerMetric.is_success, 1), else_=0)) / func.count(ServerMetric.id) * 100).label('success_rate'), - func.max(ServerMetric.timestamp).label('last_execution') # Using timestamp instead of created_at - ).outerjoin( - ServerMetric - ).group_by( - DbServer.id, DbServer.name - ).order_by( - desc('execution_count') - ).limit(limit).all() + """Retrieve the top-performing servers based on execution count. + + Queries the database to get servers with their metrics, ordered by the number of executions + in descending order. Returns a list of TopPerformer objects containing server details and + performance metrics. + + Args: + db (Session): Database session for querying server metrics. + limit (int, optional): Maximum number of servers to return. Defaults to 5. + + Returns: + List[TopPerformer]: A list of TopPerformer objects, each containing: + - id: Server ID. + - name: Server name. + - execution_count: Total number of executions. + - avg_response_time: Average response time in seconds, or None if no metrics. + - success_rate: Success rate percentage, or None if no metrics. + - last_execution: Timestamp of the last execution, or None if no metrics. + """ + results = ( + db.query( + DbServer.id, + DbServer.name, + func.count(ServerMetric.id).label("execution_count"),# pylint: disable=not-callable + func.avg(ServerMetric.response_time).label("avg_response_time"), + (func.sum(case((ServerMetric.is_success, 1), else_=0)) / func.count(ServerMetric.id) * 100).label("success_rate"), + func.max(ServerMetric.timestamp).label("last_execution"), # Using timestamp instead of created_at + ) + .outerjoin(ServerMetric) + .group_by(DbServer.id, DbServer.name) + .order_by(desc("execution_count")) + .limit(limit) + .all() + ) return [ TopPerformer( @@ -151,8 +170,9 @@ async def get_top_servers(self, db: Session, limit: int = 5) -> List[TopPerforme execution_count=result.execution_count or 0, avg_response_time=float(result.avg_response_time) if result.avg_response_time else None, success_rate=float(result.success_rate) if result.success_rate else None, - last_execution=result.last_execution - ) for result in results + last_execution=result.last_execution, + ) + for result in results ] def _convert_server_to_read(self, server: DbServer) -> ServerRead: diff --git a/mcpgateway/services/tool_service.py b/mcpgateway/services/tool_service.py index 258b1e10..f1ba3783 100644 --- a/mcpgateway/services/tool_service.py +++ b/mcpgateway/services/tool_service.py @@ -194,22 +194,41 @@ async def shutdown(self) -> None: await self._http_client.aclose() logger.info("Tool service shutdown complete") - async def get_top_tools(self, db: Session, limit: int = 5) -> List[TopPerformer]: - results = db.query( - DbTool.id, - DbTool.name, - func.count(ToolMetric.id).label('execution_count'), - func.avg(ToolMetric.response_time).label('avg_response_time'), - (func.sum(case((ToolMetric.is_success , 1), else_=0)) / func.count(ToolMetric.id) * 100).label('success_rate'), - func.max(ToolMetric.timestamp).label('last_execution') - ).outerjoin( - ToolMetric - ).group_by( - DbTool.id, DbTool.name - ).order_by( - desc('execution_count') - ).limit(limit).all() + """Retrieve the top-performing tools based on execution count. + + Queries the database to get tools with their metrics, ordered by the number of executions + in descending order. Returns a list of TopPerformer objects containing tool details and + performance metrics. + + Args: + db (Session): Database session for querying tool metrics. + limit (int, optional): Maximum number of tools to return. Defaults to 5. + + Returns: + List[TopPerformer]: A list of TopPerformer objects, each containing: + - id: Tool ID. + - name: Tool name. + - execution_count: Total number of executions. + - avg_response_time: Average response time in seconds, or None if no metrics. + - success_rate: Success rate percentage, or None if no metrics. + - last_execution: Timestamp of the last execution, or None if no metrics. + """ + results = ( + db.query( + DbTool.id, + DbTool.name, + func.count(ToolMetric.id).label("execution_count"),# pylint: disable=not-callable + func.avg(ToolMetric.response_time).label("avg_response_time"), + (func.sum(case((ToolMetric.is_success, 1), else_=0)) / func.count(ToolMetric.id) * 100).label("success_rate"), + func.max(ToolMetric.timestamp).label("last_execution"), + ) + .outerjoin(ToolMetric) + .group_by(DbTool.id, DbTool.name) + .order_by(desc("execution_count")) + .limit(limit) + .all() + ) return [ TopPerformer( @@ -218,9 +237,10 @@ async def get_top_tools(self, db: Session, limit: int = 5) -> List[TopPerformer] execution_count=result.execution_count or 0, avg_response_time=float(result.avg_response_time) if result.avg_response_time else None, success_rate=float(result.success_rate) if result.success_rate else None, - last_execution=result.last_execution - ) for result in results - ] + last_execution=result.last_execution, + ) + for result in results + ] def _convert_tool_to_read(self, tool: DbTool) -> ToolRead: """Converts a DbTool instance into a ToolRead model, including aggregated metrics and diff --git a/tests/unit/mcpgateway/test_admin.py b/tests/unit/mcpgateway/test_admin.py index d711caa6..9559c86e 100644 --- a/tests/unit/mcpgateway/test_admin.py +++ b/tests/unit/mcpgateway/test_admin.py @@ -41,7 +41,8 @@ admin_edit_server, admin_edit_tool, admin_get_gateway, - admin_get_metrics, + # admin_get_metrics, + get_aggregated_metrics, admin_get_prompt, admin_get_resource, admin_get_server, @@ -1025,7 +1026,10 @@ async def test_admin_get_metrics_with_nulls(self, mock_prompt_metrics, mock_serv mock_server_metrics.return_value = None # No metrics available mock_prompt_metrics.return_value = None - result = await admin_get_metrics(mock_db, "test-user") + # result = await admin_get_metrics(mock_db, "test-user") + result = await get_aggregated_metrics(mock_db, "test-user") + + assert result["tools"].total_executions == 0 assert result["resources"].total_executions == 100 From edb39207e6a616dd94fef01c78e6275946e0bb0c Mon Sep 17 00:00:00 2001 From: IrushaBasukala Date: Mon, 4 Aug 2025 16:58:13 +0545 Subject: [PATCH 03/11] fixing build issues Signed-off-by: IrushaBasukala --- mcpgateway/services/prompt_service.py | 8 +- mcpgateway/services/resource_service.py | 8 +- mcpgateway/services/server_service.py | 8 +- mcpgateway/services/tool_service.py | 8 +- mcpgateway/static/admin.js | 453 +++++++++++++++--------- 5 files changed, 292 insertions(+), 193 deletions(-) diff --git a/mcpgateway/services/prompt_service.py b/mcpgateway/services/prompt_service.py index fe218735..c8ceedfd 100644 --- a/mcpgateway/services/prompt_service.py +++ b/mcpgateway/services/prompt_service.py @@ -163,10 +163,10 @@ async def get_top_prompts(self, db: Session, limit: int = 5) -> List[TopPerforme db.query( DbPrompt.id, DbPrompt.name, - func.count(PromptMetric.id).label("execution_count"),# pylint: disable=not-callable - func.avg(PromptMetric.response_time).label("avg_response_time"), - (func.sum(case((PromptMetric.is_success, 1), else_=0)) / func.count(PromptMetric.id) * 100).label("success_rate"), - func.max(PromptMetric.timestamp).label("last_execution"), + func.count(PromptMetric.id).label("execution_count"), # pylint: disable=not-callable + func.avg(PromptMetric.response_time).label("avg_response_time"), # pylint: disable=not-callable + (func.sum(case((PromptMetric.is_success, 1), else_=0)) / func.count(PromptMetric.id) * 100).label("success_rate"), # pylint: disable=not-callable + func.max(PromptMetric.timestamp).label("last_execution"), # pylint: disable=not-callable ) .outerjoin(PromptMetric) .group_by(DbPrompt.id, DbPrompt.name) diff --git a/mcpgateway/services/resource_service.py b/mcpgateway/services/resource_service.py index e60c08f4..db6ba41e 100644 --- a/mcpgateway/services/resource_service.py +++ b/mcpgateway/services/resource_service.py @@ -143,10 +143,10 @@ async def get_top_resources(self, db: Session, limit: int = 5) -> List[TopPerfor db.query( DbResource.id, DbResource.uri.label("name"), # Using URI as the name field for TopPerformer - func.count(ResourceMetric.id).label("execution_count"),# pylint: disable=not-callable - func.avg(ResourceMetric.response_time).label("avg_response_time"), - (func.sum(case((ResourceMetric.is_success, 1), else_=0)) / func.count(ResourceMetric.id) * 100).label("success_rate"), - func.max(ResourceMetric.timestamp).label("last_execution"), + func.count(ResourceMetric.id).label("execution_count"), # pylint: disable=not-callable + func.avg(ResourceMetric.response_time).label("avg_response_time"), # pylint: disable=not-callable + (func.sum(case((ResourceMetric.is_success, 1), else_=0)) / func.count(ResourceMetric.id) * 100).label("success_rate"), # pylint: disable=not-callable + func.max(ResourceMetric.timestamp).label("last_execution"), # pylint: disable=not-callable ) .outerjoin(ResourceMetric) .group_by(DbResource.id, DbResource.uri) diff --git a/mcpgateway/services/server_service.py b/mcpgateway/services/server_service.py index b6d8e798..af536c61 100644 --- a/mcpgateway/services/server_service.py +++ b/mcpgateway/services/server_service.py @@ -151,10 +151,10 @@ async def get_top_servers(self, db: Session, limit: int = 5) -> List[TopPerforme db.query( DbServer.id, DbServer.name, - func.count(ServerMetric.id).label("execution_count"),# pylint: disable=not-callable - func.avg(ServerMetric.response_time).label("avg_response_time"), - (func.sum(case((ServerMetric.is_success, 1), else_=0)) / func.count(ServerMetric.id) * 100).label("success_rate"), - func.max(ServerMetric.timestamp).label("last_execution"), # Using timestamp instead of created_at + func.count(ServerMetric.id).label("execution_count"), # pylint: disable=not-callable + func.avg(ServerMetric.response_time).label("avg_response_time"), # pylint: disable=not-callable + (func.sum(case((ServerMetric.is_success, 1), else_=0)) / func.count(ServerMetric.id) * 100).label("success_rate"), # pylint: disable=not-callable + func.max(ServerMetric.timestamp).label("last_execution"), # pylint: disable=not-callable ) .outerjoin(ServerMetric) .group_by(DbServer.id, DbServer.name) diff --git a/mcpgateway/services/tool_service.py b/mcpgateway/services/tool_service.py index f1ba3783..aef8fd25 100644 --- a/mcpgateway/services/tool_service.py +++ b/mcpgateway/services/tool_service.py @@ -218,10 +218,10 @@ async def get_top_tools(self, db: Session, limit: int = 5) -> List[TopPerformer] db.query( DbTool.id, DbTool.name, - func.count(ToolMetric.id).label("execution_count"),# pylint: disable=not-callable - func.avg(ToolMetric.response_time).label("avg_response_time"), - (func.sum(case((ToolMetric.is_success, 1), else_=0)) / func.count(ToolMetric.id) * 100).label("success_rate"), - func.max(ToolMetric.timestamp).label("last_execution"), + func.count(ToolMetric.id).label("execution_count"), # pylint: disable=not-callable + func.avg(ToolMetric.response_time).label("avg_response_time"), # pylint: disable=not-callable + (func.sum(case((ToolMetric.is_success, 1), else_=0)) / func.count(ToolMetric.id) * 100).label("success_rate"), # pylint: disable=not-callable + func.max(ToolMetric.timestamp).label("last_execution"), # pylint: disable=not-callable ) .outerjoin(ToolMetric) .group_by(DbTool.id, DbTool.name) diff --git a/mcpgateway/static/admin.js b/mcpgateway/static/admin.js index 2a1d6af9..4e61b919 100644 --- a/mcpgateway/static/admin.js +++ b/mcpgateway/static/admin.js @@ -842,7 +842,6 @@ function showMetricsPlaceholder() { // ENHANCED METRICS DISPLAY with Complete System Overview // =================================================================== - function displayMetrics(data) { const metricsPanel = safeGetElement("metrics-panel"); if (!metricsPanel) { @@ -1250,18 +1249,18 @@ function extractKPIData(data) { // } function createEnhancedTopPerformersSection(topData) { try { - const section = document.createElement('div'); - section.className = 'bg-white rounded-lg shadow p-6 dark:bg-gray-800'; - - const title = document.createElement('h3'); - title.className = 'text-lg font-medium mb-4 dark:text-gray-200'; - title.textContent = 'Top Performers'; - title.setAttribute('aria-label', 'Top Performers Section'); + const section = document.createElement("div"); + section.className = "bg-white rounded-lg shadow p-6 dark:bg-gray-800"; + + const title = document.createElement("h3"); + title.className = "text-lg font-medium mb-4 dark:text-gray-200"; + title.textContent = "Top Performers"; + title.setAttribute("aria-label", "Top Performers Section"); section.appendChild(title); - + // Loading skeleton - const skeleton = document.createElement('div'); - skeleton.className = 'animate-pulse space-y-4'; + const skeleton = document.createElement("div"); + skeleton.className = "animate-pulse space-y-4"; skeleton.innerHTML = `
@@ -1269,171 +1268,253 @@ function createEnhancedTopPerformersSection(topData) {
`; section.appendChild(skeleton); - + // Tabs - const tabsContainer = document.createElement('div'); - tabsContainer.className = 'border-b border-gray-200 dark:border-gray-700'; - const tabList = document.createElement('nav'); - tabList.className = '-mb-px flex space-x-8 overflow-x-auto'; - tabList.setAttribute('aria-label', 'Top Performers Tabs'); - - const entityTypes = ['tools', 'resources', 'prompts', 'gateways', 'servers']; + const tabsContainer = document.createElement("div"); + tabsContainer.className = + "border-b border-gray-200 dark:border-gray-700"; + const tabList = document.createElement("nav"); + tabList.className = "-mb-px flex space-x-8 overflow-x-auto"; + tabList.setAttribute("aria-label", "Top Performers Tabs"); + + const entityTypes = [ + "tools", + "resources", + "prompts", + "gateways", + "servers", + ]; entityTypes.forEach((type, index) => { if (topData[type] && Array.isArray(topData[type])) { const tab = createTab(type, index === 0); tabList.appendChild(tab); } }); - + tabsContainer.appendChild(tabList); section.appendChild(tabsContainer); - + // Content panels - const contentContainer = document.createElement('div'); - contentContainer.className = 'mt-4'; - + const contentContainer = document.createElement("div"); + contentContainer.className = "mt-4"; + entityTypes.forEach((type, index) => { if (topData[type] && Array.isArray(topData[type])) { - const panel = createTopPerformersTable(type, topData[type], index === 0); + const panel = createTopPerformersTable( + type, + topData[type], + index === 0, + ); contentContainer.appendChild(panel); } }); - + section.appendChild(contentContainer); - + // Remove skeleton once data is loaded setTimeout(() => skeleton.remove(), 500); // Simulate async data load - + // Export button - const exportButton = document.createElement('button'); - exportButton.className = 'mt-4 bg-indigo-600 text-white px-4 py-2 rounded hover:bg-indigo-700 dark:bg-indigo-500 dark:hover:bg-indigo-600'; - exportButton.textContent = 'Export Metrics'; + const exportButton = document.createElement("button"); + exportButton.className = + "mt-4 bg-indigo-600 text-white px-4 py-2 rounded hover:bg-indigo-700 dark:bg-indigo-500 dark:hover:bg-indigo-600"; + exportButton.textContent = "Export Metrics"; exportButton.onclick = () => exportMetricsToCSV(topData); section.appendChild(exportButton); - + return section; } catch (error) { - console.error('Error creating enhanced top performers section:', error); - showErrorMessage('Failed to load top performers section'); - return document.createElement('div'); + console.error("Error creating enhanced top performers section:", error); + showErrorMessage("Failed to load top performers section"); + return document.createElement("div"); } } +function calculateSuccessRate(item) { + const total = item.execution_count || item.executions || 0; + const successful = item.successful_count || item.successfulExecutions || 0; + return total > 0 ? Math.round((successful / total) * 100) : 0; +} + +function formatNumber(num) { + return new Intl.NumberFormat().format(num); +} + +function formatLastUsed(timestamp) { + if (!timestamp) { + return "Never"; + } + + const date = new Date(timestamp); + const now = new Date(); + const diffMs = now - date; + const diffMins = Math.floor(diffMs / 60000); + + if (diffMins < 1) { + return "Just now"; + } + if (diffMins < 60) { + return `${diffMins} min ago`; + } + if (diffMins < 1440) { + return `${Math.floor(diffMins / 60)} hours ago`; + } + if (diffMins < 10080) { + return `${Math.floor(diffMins / 1440)} days ago`; + } + return date.toLocaleDateString(); +} function createTopPerformersTable(entityType, data, isActive) { - const panel = document.createElement('div'); + const panel = document.createElement("div"); panel.id = `top-${entityType}-panel`; - panel.className = `transition-opacity duration-300 ${isActive ? 'opacity-100' : 'hidden opacity-0'}`; - panel.setAttribute('role', 'tabpanel'); - panel.setAttribute('aria-labelledby', `top-${entityType}-tab`); - + panel.className = `transition-opacity duration-300 ${isActive ? "opacity-100" : "hidden opacity-0"}`; + panel.setAttribute("role", "tabpanel"); + panel.setAttribute("aria-labelledby", `top-${entityType}-tab`); + if (data.length === 0) { - const emptyState = document.createElement('p'); - emptyState.className = 'text-gray-500 dark:text-gray-400 text-center py-4'; + const emptyState = document.createElement("p"); + emptyState.className = + "text-gray-500 dark:text-gray-400 text-center py-4"; emptyState.textContent = `No ${entityType} data available`; panel.appendChild(emptyState); return panel; } - + // Responsive table wrapper - const tableWrapper = document.createElement('div'); - tableWrapper.className = 'overflow-x-auto sm:overflow-x-visible'; - - const table = document.createElement('table'); - table.className = 'min-w-full divide-y divide-gray-200 dark:divide-gray-700'; - + const tableWrapper = document.createElement("div"); + tableWrapper.className = "overflow-x-auto sm:overflow-x-visible"; + + const table = document.createElement("table"); + table.className = + "min-w-full divide-y divide-gray-200 dark:divide-gray-700"; + // Table header - const thead = document.createElement('thead'); - thead.className = 'bg-gray-50 dark:bg-gray-700 hidden sm:table-header-group'; - const headerRow = document.createElement('tr'); - const headers = ['Rank', 'Name', 'Executions', 'Avg Response Time', 'Success Rate', 'Last Used']; - + const thead = document.createElement("thead"); + thead.className = + "bg-gray-50 dark:bg-gray-700 hidden sm:table-header-group"; + const headerRow = document.createElement("tr"); + const headers = [ + "Rank", + "Name", + "Executions", + "Avg Response Time", + "Success Rate", + "Last Used", + ]; + headers.forEach((headerText, index) => { - const th = document.createElement('th'); - th.className = 'px-6 py-3 text-left text-xs font-medium text-gray-500 dark:text-gray-300 uppercase tracking-wider'; - th.setAttribute('scope', 'col'); + const th = document.createElement("th"); + th.className = + "px-6 py-3 text-left text-xs font-medium text-gray-500 dark:text-gray-300 uppercase tracking-wider"; + th.setAttribute("scope", "col"); th.textContent = headerText; - if (index === 0) th.setAttribute('aria-sort', 'ascending'); + if (index === 0) { + th.setAttribute("aria-sort", "ascending"); + } headerRow.appendChild(th); }); - + thead.appendChild(headerRow); table.appendChild(thead); - + // Table body - const tbody = document.createElement('tbody'); - tbody.className = 'bg-white dark:bg-gray-800 divide-y divide-gray-200 dark:divide-gray-700'; - + const tbody = document.createElement("tbody"); + tbody.className = + "bg-white dark:bg-gray-800 divide-y divide-gray-200 dark:divide-gray-700"; + // Pagination (if > 5 items) const paginatedData = data.slice(0, 5); // Limit to top 5 paginatedData.forEach((item, index) => { - const row = document.createElement('tr'); - row.className = 'hover:bg-gray-50 dark:hover:bg-gray-700 transition-colors duration-200'; - + const row = document.createElement("tr"); + row.className = + "hover:bg-gray-50 dark:hover:bg-gray-700 transition-colors duration-200"; + // Rank - const rankCell = document.createElement('td'); - rankCell.className = 'px-6 py-4 whitespace-nowrap text-sm font-medium text-gray-900 dark:text-gray-100 sm:px-6 sm:py-4'; - const rankBadge = document.createElement('span'); + const rankCell = document.createElement("td"); + rankCell.className = + "px-6 py-4 whitespace-nowrap text-sm font-medium text-gray-900 dark:text-gray-100 sm:px-6 sm:py-4"; + const rankBadge = document.createElement("span"); rankBadge.className = `inline-flex items-center justify-center w-6 h-6 rounded-full ${ - index === 0 ? 'bg-yellow-400 text-yellow-900' : - index === 1 ? 'bg-gray-300 text-gray-900' : - index === 2 ? 'bg-orange-400 text-orange-900' : - 'bg-gray-100 text-gray-600' + index === 0 + ? "bg-yellow-400 text-yellow-900" + : index === 1 + ? "bg-gray-300 text-gray-900" + : index === 2 + ? "bg-orange-400 text-orange-900" + : "bg-gray-100 text-gray-600" }`; rankBadge.textContent = index + 1; - rankBadge.setAttribute('aria-label', `Rank ${index + 1}`); + rankBadge.setAttribute("aria-label", `Rank ${index + 1}`); rankCell.appendChild(rankBadge); row.appendChild(rankCell); - + // Name (clickable for drill-down) - const nameCell = document.createElement('td'); - nameCell.className = 'px-6 py-4 whitespace-nowrap text-sm text-indigo-600 dark:text-indigo-400 cursor-pointer'; - nameCell.textContent = escapeHtml(item.name || 'Unknown'); - nameCell.onclick = () => showDetailedMetrics(entityType, item.id); - nameCell.setAttribute('role', 'button'); - nameCell.setAttribute('aria-label', `View details for ${item.name || 'Unknown'}`); + const nameCell = document.createElement("td"); + nameCell.className = + "px-6 py-4 whitespace-nowrap text-sm text-indigo-600 dark:text-indigo-400 cursor-pointer"; + nameCell.textContent = escapeHtml(item.name || "Unknown"); + // nameCell.onclick = () => showDetailedMetrics(entityType, item.id); + nameCell.setAttribute("role", "button"); + nameCell.setAttribute( + "aria-label", + `View details for ${item.name || "Unknown"}`, + ); row.appendChild(nameCell); - + // Executions - const execCell = document.createElement('td'); - execCell.className = 'px-6 py-4 whitespace-nowrap text-sm text-gray-500 dark:text-gray-300 sm:px-6 sm:py-4'; - execCell.textContent = formatNumber(item.execution_count || item.executions || 0); + const execCell = document.createElement("td"); + execCell.className = + "px-6 py-4 whitespace-nowrap text-sm text-gray-500 dark:text-gray-300 sm:px-6 sm:py-4"; + execCell.textContent = formatNumber( + item.execution_count || item.executions || 0, + ); row.appendChild(execCell); - + // Avg Response Time - const avgTimeCell = document.createElement('td'); - avgTimeCell.className = 'px-6 py-4 whitespace-nowrap text-sm text-gray-500 dark:text-gray-300 sm:px-6 sm:py-4'; + const avgTimeCell = document.createElement("td"); + avgTimeCell.className = + "px-6 py-4 whitespace-nowrap text-sm text-gray-500 dark:text-gray-300 sm:px-6 sm:py-4"; const avgTime = item.avg_response_time || item.avgResponseTime; - avgTimeCell.textContent = avgTime ? `${Math.round(avgTime)}ms` : 'N/A'; + avgTimeCell.textContent = avgTime ? `${Math.round(avgTime)}ms` : "N/A"; row.appendChild(avgTimeCell); - + // Success Rate - const successCell = document.createElement('td'); - successCell.className = 'px-6 py-4 whitespace-nowrap text-sm sm:px-6 sm:py-4'; + const successCell = document.createElement("td"); + successCell.className = + "px-6 py-4 whitespace-nowrap text-sm sm:px-6 sm:py-4"; const successRate = calculateSuccessRate(item); - const successBadge = document.createElement('span'); + const successBadge = document.createElement("span"); successBadge.className = `inline-flex items-center px-2.5 py-0.5 rounded-full text-xs font-medium ${ - successRate >= 95 ? 'bg-green-100 text-green-800 dark:bg-green-800 dark:text-green-100' : - successRate >= 80 ? 'bg-yellow-100 text-yellow-800 dark:bg-yellow-800 dark:text-yellow-100' : - 'bg-red-100 text-red-800 dark:bg-red-800 dark:text-red-100' + successRate >= 95 + ? "bg-green-100 text-green-800 dark:bg-green-800 dark:text-green-100" + : successRate >= 80 + ? "bg-yellow-100 text-yellow-800 dark:bg-yellow-800 dark:text-yellow-100" + : "bg-red-100 text-red-800 dark:bg-red-800 dark:text-red-100" }`; successBadge.textContent = `${successRate}%`; - successBadge.setAttribute('aria-label', `Success rate: ${successRate}%`); + successBadge.setAttribute( + "aria-label", + `Success rate: ${successRate}%`, + ); successCell.appendChild(successBadge); row.appendChild(successCell); - + // Last Used - const lastUsedCell = document.createElement('td'); - lastUsedCell.className = 'px-6 py-4 whitespace-nowrap text-sm text-gray-500 dark:text-gray-300 sm:px-6 sm:py-4'; - lastUsedCell.textContent = formatLastUsed(item.last_execution || item.lastExecution); + const lastUsedCell = document.createElement("td"); + lastUsedCell.className = + "px-6 py-4 whitespace-nowrap text-sm text-gray-500 dark:text-gray-300 sm:px-6 sm:py-4"; + lastUsedCell.textContent = formatLastUsed( + item.last_execution || item.lastExecution, + ); row.appendChild(lastUsedCell); - + tbody.appendChild(row); }); - + table.appendChild(tbody); tableWrapper.appendChild(table); panel.appendChild(tableWrapper); - + // Pagination controls (if needed) if (data.length > 5) { const pagination = createPaginationControls(data.length, 5, (page) => { @@ -1441,23 +1522,23 @@ function createTopPerformersTable(entityType, data, isActive) { }); panel.appendChild(pagination); } - + return panel; } function createTab(type, isActive) { - const tab = document.createElement('a'); - tab.href = '#'; + const tab = document.createElement("a"); + tab.href = "#"; tab.id = `top-${type}-tab`; tab.className = `${ - isActive - ? 'border-indigo-500 text-indigo-600 dark:text-indigo-400' - : 'border-transparent text-gray-500 hover:text-gray-700 hover:border-gray-300 dark:text-gray-400 dark:hover:text-gray-300' + isActive + ? "border-indigo-500 text-indigo-600 dark:text-indigo-400" + : "border-transparent text-gray-500 hover:text-gray-700 hover:border-gray-300 dark:text-gray-400 dark:hover:text-gray-300" } whitespace-nowrap py-4 px-1 border-b-2 font-medium text-sm capitalize transition-colors duration-200 sm:py-4 sm:px-1`; tab.textContent = type; - tab.setAttribute('role', 'tab'); - tab.setAttribute('aria-controls', `top-${type}-panel`); - tab.setAttribute('aria-selected', isActive.toString()); + tab.setAttribute("role", "tab"); + tab.setAttribute("aria-controls", `top-${type}-panel`); + tab.setAttribute("aria-selected", isActive.toString()); tab.onclick = (e) => { e.preventDefault(); showTopPerformerTab(type); @@ -1466,85 +1547,103 @@ function createTab(type, isActive) { } function showTopPerformerTab(activeType) { - const entityTypes = ['tools', 'resources', 'prompts', 'gateways', 'servers']; - entityTypes.forEach(type => { + const entityTypes = [ + "tools", + "resources", + "prompts", + "gateways", + "servers", + ]; + entityTypes.forEach((type) => { const panel = document.getElementById(`top-${type}-panel`); const tab = document.getElementById(`top-${type}-tab`); if (panel) { - panel.classList.toggle('hidden', type !== activeType); - panel.classList.toggle('opacity-100', type === activeType); - panel.classList.toggle('opacity-0', type !== activeType); - panel.setAttribute('aria-hidden', type !== activeType); + panel.classList.toggle("hidden", type !== activeType); + panel.classList.toggle("opacity-100", type === activeType); + panel.classList.toggle("opacity-0", type !== activeType); + panel.setAttribute("aria-hidden", type !== activeType); } if (tab) { - tab.classList.toggle('border-indigo-500', type === activeType); - tab.classList.toggle('text-indigo-600', type === activeType); - tab.classList.toggle('dark:text-indigo-400', type === activeType); - tab.classList.toggle('border-transparent', type !== activeType); - tab.classList.toggle('text-gray-500', type !== activeType); - tab.setAttribute('aria-selected', type === activeType); + tab.classList.toggle("border-indigo-500", type === activeType); + tab.classList.toggle("text-indigo-600", type === activeType); + tab.classList.toggle("dark:text-indigo-400", type === activeType); + tab.classList.toggle("border-transparent", type !== activeType); + tab.classList.toggle("text-gray-500", type !== activeType); + tab.setAttribute("aria-selected", type === activeType); } }); } function createPaginationControls(totalItems, itemsPerPage, onPageChange) { - const pagination = document.createElement('div'); - pagination.className = 'mt-4 flex justify-end space-x-2'; + const pagination = document.createElement("div"); + pagination.className = "mt-4 flex justify-end space-x-2"; const totalPages = Math.ceil(totalItems / itemsPerPage); - + for (let page = 1; page <= totalPages; page++) { - const button = document.createElement('button'); - button.className = `px-3 py-1 rounded ${page === 1 ? 'bg-indigo-600 text-white' : 'bg-gray-200 text-gray-700 dark:bg-gray-700 dark:text-gray-300'}`; + const button = document.createElement("button"); + button.className = `px-3 py-1 rounded ${page === 1 ? "bg-indigo-600 text-white" : "bg-gray-200 text-gray-700 dark:bg-gray-700 dark:text-gray-300"}`; button.textContent = page; button.onclick = () => { onPageChange(page); - pagination.querySelectorAll('button').forEach(btn => { - btn.className = `px-3 py-1 rounded ${btn === button ? 'bg-indigo-600 text-white' : 'bg-gray-200 text-gray-700 dark:bg-gray-700 dark:text-gray-300'}`; + pagination.querySelectorAll("button").forEach((btn) => { + btn.className = `px-3 py-1 rounded ${btn === button ? "bg-indigo-600 text-white" : "bg-gray-200 text-gray-700 dark:bg-gray-700 dark:text-gray-300"}`; }); }; pagination.appendChild(button); } - + return pagination; } function updateTableRows(panel, entityType, data, page) { - const tbody = panel.querySelector('tbody'); - tbody.innerHTML = ''; + const tbody = panel.querySelector("tbody"); + tbody.innerHTML = ""; const start = (page - 1) * 5; const paginatedData = data.slice(start, start + 5); - + paginatedData.forEach((item, index) => { - const row = document.createElement('tr'); + const row = document.createElement("tr"); // ... (same row creation logic as in createTopPerformersTable) tbody.appendChild(row); }); } function exportMetricsToCSV(topData) { - const headers = ['Entity Type', 'Rank', 'Name', 'Executions', 'Avg Response Time', 'Success Rate', 'Last Used']; + const headers = [ + "Entity Type", + "Rank", + "Name", + "Executions", + "Avg Response Time", + "Success Rate", + "Last Used", + ]; const rows = []; - - ['tools', 'resources', 'prompts', 'gateways', 'servers'].forEach(type => { + + ["tools", "resources", "prompts", "gateways", "servers"].forEach((type) => { if (topData[type] && Array.isArray(topData[type])) { topData[type].forEach((item, index) => { rows.push([ type, index + 1, - `"${escapeHtml(item.name || 'Unknown')}"`, + `"${escapeHtml(item.name || "Unknown")}"`, formatNumber(item.execution_count || item.executions || 0), - item.avg_response_time || item.avgResponseTime ? `${Math.round(item.avg_response_time || item.avgResponseTime)}ms` : 'N/A', + item.avg_response_time || item.avgResponseTime + ? `${Math.round(item.avg_response_time || item.avgResponseTime)}ms` + : "N/A", `${calculateSuccessRate(item)}%`, - formatLastUsed(item.last_execution || item.lastExecution) + formatLastUsed(item.last_execution || item.lastExecution), ]); }); } }); - - const csv = [headers.join(','), ...rows.map(row => row.join(','))].join('\n'); - const blob = new Blob([csv], { type: 'text/csv' }); + + const csv = [headers.join(","), ...rows.map((row) => row.join(","))].join( + "\n", + ); + const blob = new Blob([csv], { type: "text/csv" }); const url = URL.createObjectURL(blob); - const a = document.createElement('a'); + const a = document.createElement("a"); a.href = url; a.download = `top_performers_${new Date().toISOString()}.csv`; a.click(); @@ -1554,43 +1653,43 @@ function exportMetricsToCSV(topData) { /** * SECURITY: Create top item card with safe content handling */ -function createTopItemCard(title, items) { - try { - const card = document.createElement("div"); - card.className = "bg-gray-50 rounded p-4 dark:bg-gray-700"; +// function createTopItemCard(title, items) { +// try { +// const card = document.createElement("div"); +// card.className = "bg-gray-50 rounded p-4 dark:bg-gray-700"; - const cardTitle = document.createElement("h4"); - cardTitle.className = "font-medium mb-2 dark:text-gray-200"; - cardTitle.textContent = `Top ${title}`; - card.appendChild(cardTitle); +// const cardTitle = document.createElement("h4"); +// cardTitle.className = "font-medium mb-2 dark:text-gray-200"; +// cardTitle.textContent = `Top ${title}`; +// card.appendChild(cardTitle); - const list = document.createElement("ul"); - list.className = "space-y-1"; +// const list = document.createElement("ul"); +// list.className = "space-y-1"; - items.slice(0, 5).forEach((item) => { - const listItem = document.createElement("li"); - listItem.className = - "text-sm text-gray-600 dark:text-gray-300 flex justify-between"; +// items.slice(0, 5).forEach((item) => { +// const listItem = document.createElement("li"); +// listItem.className = +// "text-sm text-gray-600 dark:text-gray-300 flex justify-between"; - const nameSpan = document.createElement("span"); - nameSpan.textContent = item.name || "Unknown"; +// const nameSpan = document.createElement("span"); +// nameSpan.textContent = item.name || "Unknown"; - const countSpan = document.createElement("span"); - countSpan.className = "font-medium"; - countSpan.textContent = String(item.executions || 0); +// const countSpan = document.createElement("span"); +// countSpan.className = "font-medium"; +// countSpan.textContent = String(item.executions || 0); - listItem.appendChild(nameSpan); - listItem.appendChild(countSpan); - list.appendChild(listItem); - }); +// listItem.appendChild(nameSpan); +// listItem.appendChild(countSpan); +// list.appendChild(listItem); +// }); - card.appendChild(list); - return card; - } catch (error) { - console.error("Error creating top item card:", error); - return document.createElement("div"); // Safe fallback - } -} +// card.appendChild(list); +// return card; +// } catch (error) { +// console.error("Error creating top item card:", error); +// return document.createElement("div"); // Safe fallback +// } +// } /** * SECURITY: Create performance metrics card with safe display From 91edccfa3326919f43c35c9bb979254b3c5b4be9 Mon Sep 17 00:00:00 2001 From: Mihai Criveti Date: Sun, 10 Aug 2025 00:40:54 +0100 Subject: [PATCH 04/11] Fix many issues, rebase, still cannot properly add server Signed-off-by: Mihai Criveti --- alembic.ini | 147 ----- alembic/README | 172 ------ alembic/env.py | 187 ------ alembic/script.py.mako | 28 - .../b77ca9d2de7e_uuid_pk_and_slug_refactor.py | 552 ------------------ mcpgateway/admin.py | 6 +- mcpgateway/alembic/README.md | 2 +- mcpgateway/alembic/script.py.mako | 2 +- .../e4fc04d1a442_add_annotations_to_tables.py | 2 +- ...490e949b1_add_improved_status_to_tables.py | 2 +- mcpgateway/migrations/env.py | 84 --- mcpgateway/schemas.py | 1 + mcpgateway/services/prompt_service.py | 40 +- mcpgateway/services/resource_service.py | 49 +- mcpgateway/services/server_service.py | 40 +- mcpgateway/services/tool_service.py | 47 +- tests/e2e/test_main_apis.py | 4 +- .../mcpgateway/services/test_tool_service.py | 26 +- tests/unit/mcpgateway/test_admin.py | 20 +- 19 files changed, 126 insertions(+), 1285 deletions(-) delete mode 100644 alembic.ini delete mode 100644 alembic/README delete mode 100644 alembic/env.py delete mode 100644 alembic/script.py.mako delete mode 100644 alembic/versions/b77ca9d2de7e_uuid_pk_and_slug_refactor.py rename {alembic => mcpgateway/alembic}/versions/e4fc04d1a442_add_annotations_to_tables.py (97%) rename {alembic => mcpgateway/alembic}/versions/e75490e949b1_add_improved_status_to_tables.py (97%) delete mode 100644 mcpgateway/migrations/env.py diff --git a/alembic.ini b/alembic.ini deleted file mode 100644 index 07918167..00000000 --- a/alembic.ini +++ /dev/null @@ -1,147 +0,0 @@ -# A generic, single database configuration. - -[alembic] -# path to migration scripts. -# this is typically a path given in POSIX (e.g. forward slashes) -# format, relative to the token %(here)s which refers to the location of this -# ini file -script_location = %(here)s/alembic - -# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s -# Uncomment the line below if you want the files to be prepended with date and time -# see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file -# for all available tokens -# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s - -# sys.path path, will be prepended to sys.path if present. -# defaults to the current working directory. for multiple paths, the path separator -# is defined by "path_separator" below. -prepend_sys_path = . - - -# timezone to use when rendering the date within the migration file -# as well as the filename. -# If specified, requires the python>=3.9 or backports.zoneinfo library and tzdata library. -# Any required deps can installed by adding `alembic[tz]` to the pip requirements -# string value is passed to ZoneInfo() -# leave blank for localtime -# timezone = - -# max length of characters to apply to the "slug" field -# truncate_slug_length = 40 - -# set to 'true' to run the environment during -# the 'revision' command, regardless of autogenerate -# revision_environment = false - -# set to 'true' to allow .pyc and .pyo files without -# a source .py file to be detected as revisions in the -# versions/ directory -# sourceless = false - -# version location specification; This defaults -# to /versions. When using multiple version -# directories, initial revisions must be specified with --version-path. -# The path separator used here should be the separator specified by "path_separator" -# below. -# version_locations = %(here)s/bar:%(here)s/bat:%(here)s/alembic/versions - -# path_separator; This indicates what character is used to split lists of file -# paths, including version_locations and prepend_sys_path within configparser -# files such as alembic.ini. -# The default rendered in new alembic.ini files is "os", which uses os.pathsep -# to provide os-dependent path splitting. -# -# Note that in order to support legacy alembic.ini files, this default does NOT -# take place if path_separator is not present in alembic.ini. If this -# option is omitted entirely, fallback logic is as follows: -# -# 1. Parsing of the version_locations option falls back to using the legacy -# "version_path_separator" key, which if absent then falls back to the legacy -# behavior of splitting on spaces and/or commas. -# 2. Parsing of the prepend_sys_path option falls back to the legacy -# behavior of splitting on spaces, commas, or colons. -# -# Valid values for path_separator are: -# -# path_separator = : -# path_separator = ; -# path_separator = space -# path_separator = newline -# -# Use os.pathsep. Default configuration used for new projects. -path_separator = os - -# set to 'true' to search source files recursively -# in each "version_locations" directory -# new in Alembic version 1.10 -# recursive_version_locations = false - -# the output encoding used when revision files -# are written from script.py.mako -# output_encoding = utf-8 - -# database URL. This is consumed by the user-maintained env.py script only. -# other means of configuring database URLs may be customized within the env.py -# file. -sqlalchemy.url = driver://user:pass@localhost/dbname - - -[post_write_hooks] -# post_write_hooks defines scripts or Python functions that are run -# on newly generated revision scripts. See the documentation for further -# detail and examples - -# format using "black" - use the console_scripts runner, against the "black" entrypoint -# hooks = black -# black.type = console_scripts -# black.entrypoint = black -# black.options = -l 79 REVISION_SCRIPT_FILENAME - -# lint with attempts to fix using "ruff" - use the module runner, against the "ruff" module -# hooks = ruff -# ruff.type = module -# ruff.module = ruff -# ruff.options = check --fix REVISION_SCRIPT_FILENAME - -# Alternatively, use the exec runner to execute a binary found on your PATH -# hooks = ruff -# ruff.type = exec -# ruff.executable = ruff -# ruff.options = check --fix REVISION_SCRIPT_FILENAME - -# Logging configuration. This is also consumed by the user-maintained -# env.py script only. -[loggers] -keys = root,sqlalchemy,alembic - -[handlers] -keys = console - -[formatters] -keys = generic - -[logger_root] -level = WARNING -handlers = console -qualname = - -[logger_sqlalchemy] -level = WARNING -handlers = -qualname = sqlalchemy.engine - -[logger_alembic] -level = INFO -handlers = -qualname = alembic - -[handler_console] -class = StreamHandler -args = (sys.stderr,) -level = NOTSET -formatter = generic - -[formatter_generic] -format = %(levelname)-5.5s [%(name)s] %(message)s -datefmt = %H:%M:%S diff --git a/alembic/README b/alembic/README deleted file mode 100644 index 1738eac7..00000000 --- a/alembic/README +++ /dev/null @@ -1,172 +0,0 @@ -Generic single-database configuration.# Alembic Migration Guide for `mcpgateway` - -> Creating, applying, and managing schema migrations with Alembic. - ---- - -## Table of Contents - -1. [Why Alembic?](#why-alembic) -2. [Prerequisites](#prerequisites) -3. [Directory Layout](#directory-layout) -4. [Everyday Workflow](#everyday-workflow) -5. [Helpful Make Targets](#helpful-make-targets) -6. [Troubleshooting](#troubleshooting) -7. [Further Reading](#further-reading) - ---- - -## Why Alembic? - -- **Versioned DDL** - Revisions are timestamped, diff-able, and reversible. -- **Autogeneration** - Detects model vs. DB drift and writes `op.create_table`, `op.add_column`, etc. -- **Multi-DB Support** - Works with SQLite, PostgreSQL, MySQL-anything SQLAlchemy supports. -- **Zero Runtime Cost** - Only runs when you call it (dev, CI, deploy). - ---- - -## Prerequisites - -```bash -# Activate your virtual environment first -pip install --upgrade alembic -``` - -You do not need to set up `alembic.ini`, `env.py`, or metadata wiring - they're already configured. - ---- - -## Directory Layout - -``` -alembic.ini -alembic/ -├── env.py -├── script.py.mako -└── versions/ - ├── 20250626235501_initial_schema.py - └── ... -``` - -* `alembic.ini`: Configuration file -* `env.py`: Connects Alembic to your models and DB settings -* `script.py.mako`: Template for new revisions (keep this!) -* `versions/`: Contains all migration scripts - ---- - -## Everyday Workflow - -> **1 Edit → 2 Revision → 3 Upgrade** - -| Step | What you do | -| ------------------------ | ----------------------------------------------------------------------------- | -| **1. Change models** | Modify SQLAlchemy models in `mcpgateway.db` or its submodules. | -| **2. Generate revision** | Run: `MSG="add users table"` then `alembic revision --autogenerate -m "$MSG"` | -| **3. Review** | Open the new file in `alembic/versions/`. Verify the operations are correct. | -| **4. Upgrade DB** | Run: `alembic upgrade head` | -| **5. Commit** | Run: `git add alembic/versions/*.py` | - -### Other Common Commands - -```bash -alembic -c mcpgateway/alembic.ini current # Show current DB revision -alembic history --verbose # Show all migrations and their order -alembic downgrade -1 # Roll back one revision -alembic downgrade # Roll back to a specific revision hash -``` - ---- - -## ✅ Make Targets: Alembic Migration Commands - -These targets help you manage database schema migrations using Alembic. - -> You must have a valid `alembic/` setup and a working SQLAlchemy model base (`Base.metadata`). - ---- - -### 💡 List all available targets (with help) - -```bash -make help -``` - -This will include the Alembic section: - -``` -# 🛢️ Alembic tasks -db-new Autogenerate revision (MSG="title") -db-up Upgrade DB to head -db-down Downgrade one step (REV=-1 or hash) -db-current Show current DB revision -db-history List the migration graph -``` - ---- - -### 🔨 Commands - -| Command | Description | -| -------------------------- | ------------------------------------------------------ | -| `make db-new MSG="..."` | Generate a new migration based on model changes. | -| `make db-up` | Apply all unapplied migrations. | -| `make db-down` | Roll back the latest migration (`REV=-1` by default). | -| `make db-down REV=abc1234` | Roll back to a specific revision by hash. | -| `make db-current` | Print the current revision ID applied to the database. | -| `make db-history` | Show the full migration history and graph. | - ---- - -### 📌 Examples - -```bash -# Create a new migration with a custom message -make db-new MSG="add users table" - -# Apply it to the database -make db-up - -# Downgrade the last migration -make db-down - -# Downgrade to a specific revision -make db-down REV=cf1283d7fa92 - -# Show the current applied revision -make db-current - -# Show all migration history -make db-history -``` - ---- - -### 🛑 Notes - -* You must **edit models first** before `make db-new` generates anything useful. -* Always **review generated migration files** before committing. -* Don't forget to run `make db-up` on CI or deploy if using migrations to manage schema. - ---- - -## Troubleshooting - -| Symptom | Cause / Fix | -| ---------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------- | -| **Empty migration (`pass`)** | Alembic couldn't detect models. Make sure all model classes are imported before `Base.metadata` is used (already handled in your `env.py`). | -| **`Can't locate revision ...`** | You deleted or renamed a revision file that the DB is pointing to. Either restore it or run `alembic stamp base` and recreate the revision. | -| **`script.py.mako` missing** | This file is required. Run `alembic init alembic` in a temp folder and copy the missing template into your project. | -| **SQLite foreign key limitations** | SQLite doesn't allow dropping constraints. Use `create table → copy → drop` flow manually, or plan around it. | -| **DB not updating** | Did you forget to run `alembic upgrade head`? Check with `alembic -c mcpgateway/alembic.ini current`. | -| **Wrong DB URL or config errors** | Confirm `settings.database_url` is valid. Check `env.py` and your `.env`/config settings. Alembic ignores `alembic.ini` for URLs in your setup. | -| **Model changes not detected** | Alembic only picks up declarative models in `Base.metadata`. Ensure all models are imported and not behind `if TYPE_CHECKING:` or other lazy imports. | - ---- - -## Further Reading - -* Official docs: [https://alembic.sqlalchemy.org](https://alembic.sqlalchemy.org) -* Autogenerate docs: [https://alembic.sqlalchemy.org/en/latest/autogenerate.html](https://alembic.sqlalchemy.org/en/latest/autogenerate.html) - ---- \ No newline at end of file diff --git a/alembic/env.py b/alembic/env.py deleted file mode 100644 index ff5e0600..00000000 --- a/alembic/env.py +++ /dev/null @@ -1,187 +0,0 @@ -# -*- coding: utf-8 -*- -"""Alembic environment configuration for database migrations. - -Copyright 2025 -SPDX-License-Identifier: Apache-2.0 -Authors: Mihai Criveti, Madhav Kandukuri - -This module configures the Alembic migration environment for the MCP Gateway -application. It sets up both offline and online migration modes, configures -logging, and establishes the database connection parameters. - -The module performs the following key functions: -- Configures Alembic to locate migration scripts in the mcpgateway package -- Sets up Python logging based on the alembic.ini configuration -- Imports the SQLAlchemy metadata from the application models -- Configures the database URL from application settings -- Provides functions for running migrations in both offline and online modes - -Offline mode generates SQL scripts without connecting to the database, while -online mode executes migrations directly against a live database connection. - -Attributes: - config (Config): The Alembic configuration object loaded from alembic.ini. - target_metadata (MetaData): SQLAlchemy metadata object containing all - table definitions from the application models. - -Examples: - Running migrations in offline mode:: - - alembic upgrade head --sql - - Running migrations in online mode:: - - alembic upgrade head - - The module is typically not imported directly but is used by Alembic - when executing migration commands. - -Note: - This file is automatically executed by Alembic and should not be - imported or run directly by application code. -""" - -# Standard -from importlib.resources import files -from logging.config import fileConfig - -# Third-Party -from alembic import context - -# this is the Alembic Config object, which provides -# access to the values within the .ini file in use. -from alembic.config import Config -from sqlalchemy import engine_from_config, pool - -# First-Party -from mcpgateway.config import settings -from mcpgateway.db import Base - -# from mcpgateway.db import get_metadata -# target_metadata = get_metadata() - - -# Create config object - this is the standard way in Alembic -config = getattr(context, "config", None) or Config() - - -def _inside_alembic() -> bool: - """Detect if this module is being executed by the Alembic CLI. - - This function checks whether the current execution context is within - an Alembic migration environment. It's used to prevent migration code - from running when this module is imported for other purposes (e.g., - during testing or when importing models). - - The detection works by checking for the presence of the '_proxy' attribute - on the alembic.context object. This attribute is set internally by Alembic - when it loads and executes the env.py file during migration operations. - - Returns: - bool: True if running under Alembic CLI (e.g., during 'alembic upgrade', - 'alembic downgrade', etc.), False if imported normally by Python - code or during testing. - - Examples: - When running migrations:: - - $ alembic upgrade head - # _inside_alembic() returns True - - When importing in tests or application code:: - - from mcpgateway.alembic.env import target_metadata - # _inside_alembic() returns False - - Note: - This guard is crucial to prevent the migration execution code at the - bottom of this module from running during normal imports. Without it, - importing this module would attempt to run migrations every time. - """ - return getattr(context, "_proxy", None) is not None - - -config.set_main_option("script_location", str(files("mcpgateway").joinpath("alembic"))) - -# Interpret the config file for Python logging. -# This line sets up loggers basically. -if config.config_file_name is not None: - fileConfig( - config.config_file_name, - disable_existing_loggers=False, - ) - -# First-Party -# add your model's MetaData object here -# for 'autogenerate' support -# from myapp import mymodel - -target_metadata = Base.metadata - -# other values from the config, defined by the needs of env.py, -# can be acquired: -# my_important_option = config.get_main_option("my_important_option") -# ... etc. - -config.set_main_option( - "sqlalchemy.url", - settings.database_url, -) - - -def run_migrations_offline() -> None: - """Run migrations in 'offline' mode. - - This configures the context with just a URL - and not an Engine, though an Engine is acceptable - here as well. By skipping the Engine creation - we don't even need a DBAPI to be available. - - Calls to context.execute() here emit the given string to the - script output. - - """ - url = config.get_main_option("sqlalchemy.url") - context.configure( - url=url, - target_metadata=target_metadata, - literal_binds=True, - dialect_opts={"paramstyle": "named"}, - ) - - with context.begin_transaction(): - context.run_migrations() - - -def run_migrations_online() -> None: - """Run migrations in 'online' mode. - - In this scenario we need to create an Engine - and associate a connection with the context. - - """ - connection = config.attributes.get("connection") - if connection is None: - connectable = engine_from_config( - config.get_section(config.config_ini_section, {}), - prefix="sqlalchemy.", - poolclass=pool.NullPool, - ) - - with connectable.connect() as connection: - context.configure(connection=connection, target_metadata=target_metadata) - - with context.begin_transaction(): - context.run_migrations() - else: - context.configure(connection=connection, target_metadata=target_metadata) - - with context.begin_transaction(): - context.run_migrations() - - -if _inside_alembic(): - if context.is_offline_mode(): - run_migrations_offline() - else: - run_migrations_online() \ No newline at end of file diff --git a/alembic/script.py.mako b/alembic/script.py.mako deleted file mode 100644 index 11016301..00000000 --- a/alembic/script.py.mako +++ /dev/null @@ -1,28 +0,0 @@ -"""${message} - -Revision ID: ${up_revision} -Revises: ${down_revision | comma,n} -Create Date: ${create_date} - -""" -from typing import Sequence, Union - -from alembic import op -import sqlalchemy as sa -${imports if imports else ""} - -# revision identifiers, used by Alembic. -revision: str = ${repr(up_revision)} -down_revision: Union[str, Sequence[str], None] = ${repr(down_revision)} -branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)} -depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)} - - -def upgrade() -> None: - """Upgrade schema.""" - ${upgrades if upgrades else "pass"} - - -def downgrade() -> None: - """Downgrade schema.""" - ${downgrades if downgrades else "pass"} diff --git a/alembic/versions/b77ca9d2de7e_uuid_pk_and_slug_refactor.py b/alembic/versions/b77ca9d2de7e_uuid_pk_and_slug_refactor.py deleted file mode 100644 index 6d4edabc..00000000 --- a/alembic/versions/b77ca9d2de7e_uuid_pk_and_slug_refactor.py +++ /dev/null @@ -1,552 +0,0 @@ -# -*- coding: utf-8 -*- -"""uuid-pk_and_slug_refactor - -Revision ID: b77ca9d2de7e -Revises: -Create Date: 2025-06-26 21:29:59.117140 - -""" - -# Standard -from typing import Sequence, Union -import uuid - -# Third-Party -from alembic import op -import sqlalchemy as sa -from sqlalchemy.orm import Session - -# First-Party -from mcpgateway.config import settings -from mcpgateway.utils.create_slug import slugify - -# revision identifiers, used by Alembic. -revision: str = "b77ca9d2de7e" -down_revision: Union[str, Sequence[str], None] = None -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None - - -# ────────────────────────────────────────────────────────────────────────────── -# Helpers -# ────────────────────────────────────────────────────────────────────────────── -def _use_batch() -> bool: - """Determine if batch operations are required for the current database. - - SQLite requires batch mode for certain ALTER TABLE operations like dropping - columns or altering column types. This helper checks the database dialect - to determine if batch operations should be used. - - Returns: - bool: True if the database is SQLite (requires batch mode), False otherwise. - - Examples: - >>> # In a SQLite context - >>> _use_batch() # doctest: +SKIP - True - >>> # In a PostgreSQL context - >>> _use_batch() # doctest: +SKIP - False - """ - return op.get_bind().dialect.name == "sqlite" - - -# ────────────────────────────────────────────────────────────────────────────── -# Upgrade -# ────────────────────────────────────────────────────────────────────────────── -def upgrade() -> None: - """Migrate database schema from integer to UUID primary keys with slugs. - - This migration performs a comprehensive schema transformation in three stages: - - Stage 1 - Add placeholder columns: - - Adds UUID columns (id_new) to gateways, tools, and servers - - Adds slug columns for human-readable identifiers - - Adds columns to preserve original tool names before prefixing - - Stage 2 - Data migration: - - Generates UUIDs for all primary keys - - Creates slugs from names (e.g., "My Gateway" -> "my-gateway") - - Prefixes tool names with gateway slugs (e.g., "my-tool" -> "gateway-slug-my-tool") - - Updates all foreign key references to use new UUIDs - - Stage 3 - Schema finalization: - - Drops old integer columns - - Renames new UUID columns to replace old ones - - Recreates primary keys and foreign key constraints - - Adds unique constraints on slugs and URLs - - The migration is designed to work with both SQLite (using batch operations) - and other databases. It preserves all existing data relationships while - transforming the schema. - - Note: - - Skips migration if database is fresh (no gateways table) - - Uses batch operations for SQLite compatibility - - Commits data changes before schema alterations - - Examples: - >>> # Running the migration - >>> upgrade() # doctest: +SKIP - Fresh database detected. Skipping migration. - >>> # Or for existing database - >>> upgrade() # doctest: +SKIP - Existing installation detected. Starting data and schema migration... - """ - bind = op.get_bind() - sess = Session(bind=bind) - inspector = sa.inspect(bind) - - if not inspector.has_table("gateways"): - print("Fresh database detected. Skipping migration.") - return - - print("Existing installation detected. Starting data and schema migration...") - - # ── STAGE 1: ADD NEW NULLABLE COLUMNS AS PLACEHOLDERS ───────────────── - op.add_column("gateways", sa.Column("slug", sa.String(), nullable=True)) - op.add_column("gateways", sa.Column("id_new", sa.String(36), nullable=True)) - - op.add_column("tools", sa.Column("id_new", sa.String(36), nullable=True)) - op.add_column("tools", sa.Column("original_name", sa.String(), nullable=True)) - op.add_column("tools", sa.Column("original_name_slug", sa.String(), nullable=True)) - op.add_column("tools", sa.Column("name_new", sa.String(), nullable=True)) - op.add_column("tools", sa.Column("gateway_id_new", sa.String(36), nullable=True)) - - op.add_column("resources", sa.Column("gateway_id_new", sa.String(36), nullable=True)) - op.add_column("prompts", sa.Column("gateway_id_new", sa.String(36), nullable=True)) - - op.add_column("servers", sa.Column("id_new", sa.String(36), nullable=True)) - - op.add_column("server_tool_association", sa.Column("server_id_new", sa.String(36), nullable=True)) - op.add_column("server_tool_association", sa.Column("tool_id_new", sa.String(36), nullable=True)) - - op.add_column("tool_metrics", sa.Column("tool_id_new", sa.String(36), nullable=True)) - op.add_column("server_metrics", sa.Column("server_id_new", sa.String(36), nullable=True)) - op.add_column("server_resource_association", sa.Column("server_id_new", sa.String(36), nullable=True)) - op.add_column("server_prompt_association", sa.Column("server_id_new", sa.String(36), nullable=True)) - - # ── STAGE 2: POPULATE THE NEW COLUMNS (DATA MIGRATION) ─────────────── - gateways = sess.execute(sa.select(sa.text("id, name")).select_from(sa.text("gateways"))).all() - for gid, gname in gateways: - g_uuid = uuid.uuid4().hex - sess.execute( - sa.text("UPDATE gateways SET id_new=:u, slug=:s WHERE id=:i"), - {"u": g_uuid, "s": slugify(gname), "i": gid}, - ) - - tools = sess.execute(sa.select(sa.text("id, name, gateway_id")).select_from(sa.text("tools"))).all() - for tid, tname, g_old in tools: - t_uuid = uuid.uuid4().hex - tool_slug = slugify(tname) - sess.execute( - sa.text( - """ - UPDATE tools - SET id_new=:u, - original_name=:on, - original_name_slug=:ons, - name_new = CASE - WHEN :g IS NOT NULL THEN (SELECT slug FROM gateways WHERE id = :g) || :sep || :ons - ELSE :ons - END, - gateway_id_new=(SELECT id_new FROM gateways WHERE id=:g) - WHERE id=:i - """ - ), - { - "u": t_uuid, - "on": tname, - "ons": tool_slug, - "sep": settings.gateway_tool_name_separator, - "g": g_old, - "i": tid, - }, - ) - - servers = sess.execute(sa.select(sa.text("id")).select_from(sa.text("servers"))).all() - for (sid,) in servers: - sess.execute( - sa.text("UPDATE servers SET id_new=:u WHERE id=:i"), - {"u": uuid.uuid4().hex, "i": sid}, - ) - - # Populate all dependent tables - resources = sess.execute(sa.select(sa.text("id, gateway_id")).select_from(sa.text("resources"))).all() - for rid, g_old in resources: - sess.execute(sa.text("UPDATE resources SET gateway_id_new=(SELECT id_new FROM gateways WHERE id=:g) WHERE id=:i"), {"g": g_old, "i": rid}) - prompts = sess.execute(sa.select(sa.text("id, gateway_id")).select_from(sa.text("prompts"))).all() - for pid, g_old in prompts: - sess.execute(sa.text("UPDATE prompts SET gateway_id_new=(SELECT id_new FROM gateways WHERE id=:g) WHERE id=:i"), {"g": g_old, "i": pid}) - sta = sess.execute(sa.select(sa.text("server_id, tool_id")).select_from(sa.text("server_tool_association"))).all() - for s_old, t_old in sta: - sess.execute( - sa.text("UPDATE server_tool_association SET server_id_new=(SELECT id_new FROM servers WHERE id=:s), tool_id_new=(SELECT id_new FROM tools WHERE id=:t) WHERE server_id=:s AND tool_id=:t"), - {"s": s_old, "t": t_old}, - ) - tool_metrics = sess.execute(sa.select(sa.text("id, tool_id")).select_from(sa.text("tool_metrics"))).all() - for tmid, t_old in tool_metrics: - sess.execute(sa.text("UPDATE tool_metrics SET tool_id_new=(SELECT id_new FROM tools WHERE id=:t) WHERE id=:i"), {"t": t_old, "i": tmid}) - server_metrics = sess.execute(sa.select(sa.text("id, server_id")).select_from(sa.text("server_metrics"))).all() - for smid, s_old in server_metrics: - sess.execute(sa.text("UPDATE server_metrics SET server_id_new=(SELECT id_new FROM servers WHERE id=:s) WHERE id=:i"), {"s": s_old, "i": smid}) - server_resource_assoc = sess.execute(sa.select(sa.text("server_id, resource_id")).select_from(sa.text("server_resource_association"))).all() - for s_old, r_id in server_resource_assoc: - sess.execute(sa.text("UPDATE server_resource_association SET server_id_new=(SELECT id_new FROM servers WHERE id=:s) WHERE server_id=:s AND resource_id=:r"), {"s": s_old, "r": r_id}) - server_prompt_assoc = sess.execute(sa.select(sa.text("server_id, prompt_id")).select_from(sa.text("server_prompt_association"))).all() - for s_old, p_id in server_prompt_assoc: - sess.execute(sa.text("UPDATE server_prompt_association SET server_id_new=(SELECT id_new FROM servers WHERE id=:s) WHERE server_id=:s AND prompt_id=:p"), {"s": s_old, "p": p_id}) - - sess.commit() - - # ── STAGE 3: FINALIZE SCHEMA (CORRECTED ORDER) ─────────────────────── - # First, rebuild all tables that depend on `servers` and `gateways`. - # This implicitly drops their old foreign key constraints. - with op.batch_alter_table("server_tool_association") as batch_op: - batch_op.drop_column("server_id") - batch_op.drop_column("tool_id") - batch_op.alter_column("server_id_new", new_column_name="server_id", nullable=False) - batch_op.alter_column("tool_id_new", new_column_name="tool_id", nullable=False) - batch_op.create_primary_key("pk_server_tool_association", ["server_id", "tool_id"]) - - with op.batch_alter_table("server_resource_association") as batch_op: - batch_op.drop_column("server_id") - batch_op.alter_column("server_id_new", new_column_name="server_id", nullable=False) - - with op.batch_alter_table("server_prompt_association") as batch_op: - batch_op.drop_column("server_id") - batch_op.alter_column("server_id_new", new_column_name="server_id", nullable=False) - - with op.batch_alter_table("server_metrics") as batch_op: - batch_op.drop_column("server_id") - batch_op.alter_column("server_id_new", new_column_name="server_id", nullable=False) - - with op.batch_alter_table("tool_metrics") as batch_op: - batch_op.drop_column("tool_id") - batch_op.alter_column("tool_id_new", new_column_name="tool_id", nullable=False) - - with op.batch_alter_table("tools") as batch_op: - batch_op.drop_column("id") - batch_op.alter_column("id_new", new_column_name="id", nullable=False) - batch_op.create_primary_key("pk_tools", ["id"]) - batch_op.drop_column("gateway_id") - batch_op.alter_column("gateway_id_new", new_column_name="gateway_id", nullable=True) - batch_op.drop_column("name") - batch_op.alter_column("name_new", new_column_name="name", nullable=True) - batch_op.alter_column("original_name", nullable=False) - batch_op.alter_column("original_name_slug", nullable=False) - batch_op.create_unique_constraint("uq_tools_name", ["name"]) - batch_op.create_unique_constraint("uq_gateway_id__original_name", ["gateway_id", "original_name"]) - - with op.batch_alter_table("resources") as batch_op: - batch_op.drop_column("gateway_id") - batch_op.alter_column("gateway_id_new", new_column_name="gateway_id", nullable=True) - - with op.batch_alter_table("prompts") as batch_op: - batch_op.drop_column("gateway_id") - batch_op.alter_column("gateway_id_new", new_column_name="gateway_id", nullable=True) - - # Second, now that no tables point to their old IDs, rebuild `gateways` and `servers`. - with op.batch_alter_table("gateways") as batch_op: - batch_op.drop_column("id") - batch_op.alter_column("id_new", new_column_name="id", nullable=False) - batch_op.create_primary_key("pk_gateways", ["id"]) - batch_op.alter_column("slug", nullable=False) - batch_op.create_unique_constraint("uq_gateways_slug", ["slug"]) - batch_op.create_unique_constraint("uq_gateways_url", ["url"]) - - with op.batch_alter_table("servers") as batch_op: - batch_op.drop_column("id") - batch_op.alter_column("id_new", new_column_name="id", nullable=False) - batch_op.create_primary_key("pk_servers", ["id"]) - - # Finally, recreate all the foreign key constraints in batch mode for SQLite compatibility. - # The redundant `source_table` argument has been removed from each call. - with op.batch_alter_table("tools") as batch_op: - batch_op.create_foreign_key("fk_tools_gateway_id", "gateways", ["gateway_id"], ["id"]) - with op.batch_alter_table("resources") as batch_op: - batch_op.create_foreign_key("fk_resources_gateway_id", "gateways", ["gateway_id"], ["id"]) - with op.batch_alter_table("prompts") as batch_op: - batch_op.create_foreign_key("fk_prompts_gateway_id", "gateways", ["gateway_id"], ["id"]) - with op.batch_alter_table("server_tool_association") as batch_op: - batch_op.create_foreign_key("fk_server_tool_association_servers", "servers", ["server_id"], ["id"]) - batch_op.create_foreign_key("fk_server_tool_association_tools", "tools", ["tool_id"], ["id"]) - with op.batch_alter_table("tool_metrics") as batch_op: - batch_op.create_foreign_key("fk_tool_metrics_tool_id", "tools", ["tool_id"], ["id"]) - with op.batch_alter_table("server_metrics") as batch_op: - batch_op.create_foreign_key("fk_server_metrics_server_id", "servers", ["server_id"], ["id"]) - with op.batch_alter_table("server_resource_association") as batch_op: - batch_op.create_foreign_key("fk_server_resource_association_server_id", "servers", ["server_id"], ["id"]) - with op.batch_alter_table("server_prompt_association") as batch_op: - batch_op.create_foreign_key("fk_server_prompt_association_server_id", "servers", ["server_id"], ["id"]) - - -# def upgrade() -> None: -# bind = op.get_bind() -# sess = Session(bind=bind) -# inspector = sa.inspect(bind) - -# if not inspector.has_table("gateways"): -# print("Fresh database detected. Skipping migration.") -# return - -# print("Existing installation detected. Starting data and schema migration...") - -# # ── STAGE 1: ADD NEW NULLABLE COLUMNS AS PLACEHOLDERS ───────────────── -# op.add_column("gateways", sa.Column("slug", sa.String(), nullable=True)) -# op.add_column("gateways", sa.Column("id_new", sa.String(36), nullable=True)) - -# op.add_column("tools", sa.Column("id_new", sa.String(36), nullable=True)) -# op.add_column("tools", sa.Column("original_name", sa.String(), nullable=True)) -# op.add_column("tools", sa.Column("original_name_slug", sa.String(), nullable=True)) -# op.add_column("tools", sa.Column("name_new", sa.String(), nullable=True)) -# op.add_column("tools", sa.Column("gateway_id_new", sa.String(36), nullable=True)) - -# op.add_column("resources", sa.Column("gateway_id_new", sa.String(36), nullable=True)) -# op.add_column("prompts", sa.Column("gateway_id_new", sa.String(36), nullable=True)) - -# op.add_column("servers", sa.Column("id_new", sa.String(36), nullable=True)) - -# op.add_column("server_tool_association", sa.Column("server_id_new", sa.String(36), nullable=True)) -# op.add_column("server_tool_association", sa.Column("tool_id_new", sa.String(36), nullable=True)) - -# op.add_column("tool_metrics", sa.Column("tool_id_new", sa.String(36), nullable=True)) - -# # Add columns for the new server dependencies -# op.add_column("server_metrics", sa.Column("server_id_new", sa.String(36), nullable=True)) -# op.add_column("server_resource_association", sa.Column("server_id_new", sa.String(36), nullable=True)) -# op.add_column("server_prompt_association", sa.Column("server_id_new", sa.String(36), nullable=True)) - - -# # ── STAGE 2: POPULATE THE NEW COLUMNS (DATA MIGRATION) ─────────────── -# gateways = sess.execute(sa.select(sa.text("id, name")).select_from(sa.text("gateways"))).all() -# for gid, gname in gateways: -# g_uuid = uuid.uuid4().hex -# sess.execute( -# sa.text("UPDATE gateways SET id_new=:u, slug=:s WHERE id=:i"), -# {"u": g_uuid, "s": slugify(gname), "i": gid}, -# ) - -# tools = sess.execute( -# sa.select(sa.text("id, name, gateway_id")).select_from(sa.text("tools")) -# ).all() -# for tid, tname, g_old in tools: -# t_uuid = uuid.uuid4().hex -# tool_slug = slugify(tname) -# sess.execute( -# sa.text( -# """ -# UPDATE tools -# SET id_new=:u, -# original_name=:on, -# original_name_slug=:ons, -# name_new = CASE -# WHEN :g IS NOT NULL THEN (SELECT slug FROM gateways WHERE id = :g) || :sep || :ons -# ELSE :ons -# END, -# gateway_id_new=(SELECT id_new FROM gateways WHERE id=:g) -# WHERE id=:i -# """ -# ), -# { -# "u": t_uuid, "on": tname, "ons": tool_slug, -# "sep": settings.gateway_tool_name_separator, "g": g_old, "i": tid, -# }, -# ) - -# servers = sess.execute(sa.select(sa.text("id")).select_from(sa.text("servers"))).all() -# for (sid,) in servers: -# sess.execute( -# sa.text("UPDATE servers SET id_new=:u WHERE id=:i"), -# {"u": uuid.uuid4().hex, "i": sid}, -# ) - -# # Populate all dependent tables -# resources = sess.execute(sa.select(sa.text("id, gateway_id")).select_from(sa.text("resources"))).all() -# for rid, g_old in resources: -# sess.execute(sa.text("UPDATE resources SET gateway_id_new=(SELECT id_new FROM gateways WHERE id=:g) WHERE id=:i"), {"g": g_old, "i": rid}) -# prompts = sess.execute(sa.select(sa.text("id, gateway_id")).select_from(sa.text("prompts"))).all() -# for pid, g_old in prompts: -# sess.execute(sa.text("UPDATE prompts SET gateway_id_new=(SELECT id_new FROM gateways WHERE id=:g) WHERE id=:i"), {"g": g_old, "i": pid}) -# sta = sess.execute(sa.select(sa.text("server_id, tool_id")).select_from(sa.text("server_tool_association"))).all() -# for s_old, t_old in sta: -# sess.execute(sa.text("UPDATE server_tool_association SET server_id_new=(SELECT id_new FROM servers WHERE id=:s), tool_id_new=(SELECT id_new FROM tools WHERE id=:t) WHERE server_id=:s AND tool_id=:t"), {"s": s_old, "t": t_old}) -# tool_metrics = sess.execute(sa.select(sa.text("id, tool_id")).select_from(sa.text("tool_metrics"))).all() -# for tmid, t_old in tool_metrics: -# sess.execute(sa.text("UPDATE tool_metrics SET tool_id_new=(SELECT id_new FROM tools WHERE id=:t) WHERE id=:i"), {"t": t_old, "i": tmid}) -# server_metrics = sess.execute(sa.select(sa.text("id, server_id")).select_from(sa.text("server_metrics"))).all() -# for smid, s_old in server_metrics: -# sess.execute(sa.text("UPDATE server_metrics SET server_id_new=(SELECT id_new FROM servers WHERE id=:s) WHERE id=:i"), {"s": s_old, "i": smid}) -# server_resource_assoc = sess.execute(sa.select(sa.text("server_id, resource_id")).select_from(sa.text("server_resource_association"))).all() -# for s_old, r_id in server_resource_assoc: -# sess.execute(sa.text("UPDATE server_resource_association SET server_id_new=(SELECT id_new FROM servers WHERE id=:s) WHERE server_id=:s AND resource_id=:r"), {"s": s_old, "r": r_id}) -# server_prompt_assoc = sess.execute(sa.select(sa.text("server_id, prompt_id")).select_from(sa.text("server_prompt_association"))).all() -# for s_old, p_id in server_prompt_assoc: -# sess.execute(sa.text("UPDATE server_prompt_association SET server_id_new=(SELECT id_new FROM servers WHERE id=:s) WHERE server_id=:s AND prompt_id=:p"), {"s": s_old, "p": p_id}) - -# sess.commit() - -# # ── STAGE 3: FINALIZE SCHEMA (CORRECTED ORDER) ─────────────────────── -# with op.batch_alter_table("server_tool_association") as batch_op: -# batch_op.drop_column("server_id") -# batch_op.drop_column("tool_id") -# batch_op.alter_column("server_id_new", new_column_name="server_id", nullable=False) -# batch_op.alter_column("tool_id_new", new_column_name="tool_id", nullable=False) -# batch_op.create_primary_key("pk_server_tool_association", ["server_id", "tool_id"]) - -# with op.batch_alter_table("server_resource_association") as batch_op: -# batch_op.drop_column("server_id") -# batch_op.alter_column("server_id_new", new_column_name="server_id", nullable=False) - -# with op.batch_alter_table("server_prompt_association") as batch_op: -# batch_op.drop_column("server_id") -# batch_op.alter_column("server_id_new", new_column_name="server_id", nullable=False) - -# with op.batch_alter_table("server_metrics") as batch_op: -# batch_op.drop_column("server_id") -# batch_op.alter_column("server_id_new", new_column_name="server_id", nullable=False) - -# with op.batch_alter_table("tool_metrics") as batch_op: -# batch_op.drop_column("tool_id") -# batch_op.alter_column("tool_id_new", new_column_name="tool_id", nullable=False) - -# with op.batch_alter_table("tools") as batch_op: -# batch_op.drop_column("id") -# batch_op.alter_column("id_new", new_column_name="id", nullable=False) -# batch_op.create_primary_key("pk_tools", ["id"]) -# batch_op.drop_column("gateway_id") -# batch_op.alter_column("gateway_id_new", new_column_name="gateway_id", nullable=True) -# batch_op.drop_column("name") -# batch_op.alter_column("name_new", new_column_name="name", nullable=False) -# batch_op.alter_column("original_name", nullable=False) -# batch_op.alter_column("original_name_slug", nullable=False) -# batch_op.create_unique_constraint("uq_tools_name", ["name"]) -# batch_op.create_unique_constraint("uq_gateway_id__original_name", ["gateway_id", "original_name"]) - -# with op.batch_alter_table("resources") as batch_op: -# batch_op.drop_column("gateway_id") -# batch_op.alter_column("gateway_id_new", new_column_name="gateway_id", nullable=True) - -# with op.batch_alter_table("prompts") as batch_op: -# batch_op.drop_column("gateway_id") -# batch_op.alter_column("gateway_id_new", new_column_name="gateway_id", nullable=True) - -# with op.batch_alter_table("gateways") as batch_op: -# batch_op.drop_column("id") -# batch_op.alter_column("id_new", new_column_name="id", nullable=False) -# batch_op.create_primary_key("pk_gateways", ["id"]) -# batch_op.alter_column("slug", nullable=False) -# batch_op.create_unique_constraint("uq_gateways_slug", ["slug"]) -# batch_op.create_unique_constraint("uq_gateways_url", ["url"]) - -# with op.batch_alter_table("servers") as batch_op: -# batch_op.drop_column("id") -# batch_op.alter_column("id_new", new_column_name="id", nullable=False) -# batch_op.create_primary_key("pk_servers", ["id"]) - -# # Finally, recreate all the foreign key constraints -# op.create_foreign_key("fk_tools_gateway_id", "tools", "gateways", ["gateway_id"], ["id"]) -# op.create_foreign_key("fk_resources_gateway_id", "resources", "gateways", ["gateway_id"], ["id"]) -# op.create_foreign_key("fk_prompts_gateway_id", "prompts", "gateways", ["gateway_id"], ["id"]) -# op.create_foreign_key("fk_server_tool_association_servers", "server_tool_association", "servers", ["server_id"], ["id"]) -# op.create_foreign_key("fk_server_tool_association_tools", "server_tool_association", "tools", ["tool_id"], ["id"]) -# op.create_foreign_key("fk_tool_metrics_tool_id", "tool_metrics", "tools", ["tool_id"], ["id"]) -# op.create_foreign_key("fk_server_metrics_server_id", "server_metrics", "servers", ["server_id"], ["id"]) -# op.create_foreign_key("fk_server_resource_association_server_id", "server_resource_association", "servers", ["server_id"], ["id"]) -# op.create_foreign_key("fk_server_prompt_association_server_id", "server_prompt_association", "servers", ["server_id"], ["id"]) - - -def downgrade() -> None: - """Revert database schema from UUID primary keys back to integers. - - This downgrade reverses the UUID migration but with significant limitations: - - Schema structure is restored but data is NOT preserved - - All UUID values and slug fields are lost - - Foreign key relationships are broken (columns will be NULL) - - Original integer IDs cannot be recovered - - The downgrade operates in reverse order of the upgrade: - - Stage 1 - Revert schema changes: - - Drops UUID-based constraints and keys - - Renames UUID columns back to temporary names - - Re-adds integer columns (empty/NULL) - - Stage 2 - Data migration (skipped): - - Original integer IDs cannot be restored from UUIDs - - Relationships cannot be reconstructed - - Stage 3 - Remove temporary columns: - - Drops all UUID and slug columns - - Leaves database with original schema but no data - - Warning: - This downgrade is destructive and should only be used if you need - to revert the schema structure. All data in affected tables will - need to be manually restored from backups. - - Examples: - >>> # Running the downgrade - >>> downgrade() # doctest: +SKIP - # Schema reverted but data is lost - """ - # ── STAGE 1 (REVERSE): Revert Schema to original state ───────────────── - # This reverses the operations from STAGE 3 of the upgrade. - # Data from the new columns will be lost, which is expected. - - with op.batch_alter_table("server_tool_association") as batch_op: - # Drop new constraints - batch_op.drop_constraint("fk_server_tool_association_tools", type_="foreignkey") - batch_op.drop_constraint("fk_server_tool_association_servers", type_="foreignkey") - batch_op.drop_constraint("pk_server_tool_association", type_="primarykey") - # Rename final columns back to temporary names - batch_op.alter_column("server_id", new_column_name="server_id_new") - batch_op.alter_column("tool_id", new_column_name="tool_id_new") - # Add back old integer columns (data is not restored) - batch_op.add_column(sa.Column("server_id", sa.Integer(), nullable=True)) - batch_op.add_column(sa.Column("tool_id", sa.Integer(), nullable=True)) - - with op.batch_alter_table("tools") as batch_op: - # Drop new constraints - batch_op.drop_constraint("fk_tools_gateway_id", type_="foreignkey") - batch_op.drop_constraint("uq_gateway_id__original_name", type_="unique") - batch_op.drop_constraint("uq_tools_name", type_="unique") - batch_op.drop_constraint("pk_tools", type_="primarykey") - # Rename final columns back to temporary names - batch_op.alter_column("id", new_column_name="id_new") - batch_op.alter_column("gateway_id", new_column_name="gateway_id_new") - batch_op.alter_column("name", new_column_name="name_new") - # Add back old columns - batch_op.add_column(sa.Column("id", sa.Integer(), nullable=True)) - batch_op.add_column(sa.Column("gateway_id", sa.Integer(), nullable=True)) - batch_op.add_column(sa.Column("name", sa.String(), nullable=True)) - - with op.batch_alter_table("servers") as batch_op: - batch_op.drop_constraint("pk_servers", type_="primarykey") - batch_op.alter_column("id", new_column_name="id_new") - batch_op.add_column(sa.Column("id", sa.Integer(), nullable=True)) - - with op.batch_alter_table("gateways") as batch_op: - batch_op.drop_constraint("uq_gateways_url", type_="unique") - batch_op.drop_constraint("uq_gateways_slug", type_="unique") - batch_op.drop_constraint("pk_gateways", type_="primarykey") - batch_op.alter_column("id", new_column_name="id_new") - batch_op.add_column(sa.Column("id", sa.Integer(), nullable=True)) - - # ── STAGE 2 (REVERSE): Reverse Data Migration (No-Op for Schema) ────── - # Reversing the data population (e.g., creating integer PKs from UUIDs) - # is a complex, stateful operation and is omitted here. At this point, - # the original columns exist but are empty (NULL). - - # ── STAGE 3 (REVERSE): Drop the temporary/new columns ──────────────── - # This reverses the operations from STAGE 1 of the upgrade. - op.drop_column("server_tool_association", "tool_id_new") - op.drop_column("server_tool_association", "server_id_new") - op.drop_column("servers", "id_new") - op.drop_column("tools", "gateway_id_new") - op.drop_column("tools", "name_new") - op.drop_column("tools", "original_name_slug") - op.drop_column("tools", "original_name") - op.drop_column("tools", "id_new") - op.drop_column("gateways", "id_new") - op.drop_column("gateways", "slug") \ No newline at end of file diff --git a/mcpgateway/admin.py b/mcpgateway/admin.py index 0d88a4a1..9f88f304 100644 --- a/mcpgateway/admin.py +++ b/mcpgateway/admin.py @@ -4014,7 +4014,10 @@ async def admin_delete_root(uri: str, request: Request, user: str = Depends(requ @admin_router.get("/metrics") -async def get_aggregated_metrics(db: Session = Depends(get_db)) -> Dict[str, Any]: +async def get_aggregated_metrics( + db: Session = Depends(get_db), + _user: str = Depends(require_auth), +) -> Dict[str, Any]: """Retrieve aggregated metrics and top performers for all entity types. This endpoint collects usage metrics and top-performing entities for tools, @@ -4046,7 +4049,6 @@ async def get_aggregated_metrics(db: Session = Depends(get_db)) -> Dict[str, Any "servers": await server_service.get_top_servers(db, limit=5), }, } - } return metrics diff --git a/mcpgateway/alembic/README.md b/mcpgateway/alembic/README.md index 081ee38b..00219a1a 100644 --- a/mcpgateway/alembic/README.md +++ b/mcpgateway/alembic/README.md @@ -169,4 +169,4 @@ make db-history * Official docs: [https://alembic.sqlalchemy.org](https://alembic.sqlalchemy.org) * Autogenerate docs: [https://alembic.sqlalchemy.org/en/latest/autogenerate.html](https://alembic.sqlalchemy.org/en/latest/autogenerate.html) ---- \ No newline at end of file +--- diff --git a/mcpgateway/alembic/script.py.mako b/mcpgateway/alembic/script.py.mako index fbf1fe34..11016301 100644 --- a/mcpgateway/alembic/script.py.mako +++ b/mcpgateway/alembic/script.py.mako @@ -25,4 +25,4 @@ def upgrade() -> None: def downgrade() -> None: """Downgrade schema.""" - ${downgrades if downgrades else "pass"} \ No newline at end of file + ${downgrades if downgrades else "pass"} diff --git a/alembic/versions/e4fc04d1a442_add_annotations_to_tables.py b/mcpgateway/alembic/versions/e4fc04d1a442_add_annotations_to_tables.py similarity index 97% rename from alembic/versions/e4fc04d1a442_add_annotations_to_tables.py rename to mcpgateway/alembic/versions/e4fc04d1a442_add_annotations_to_tables.py index 2ae872d3..8876f3b4 100644 --- a/alembic/versions/e4fc04d1a442_add_annotations_to_tables.py +++ b/mcpgateway/alembic/versions/e4fc04d1a442_add_annotations_to_tables.py @@ -53,4 +53,4 @@ def downgrade() -> None: print("Fresh database detected. Skipping migration.") return - op.drop_column("tools", "annotations") \ No newline at end of file + op.drop_column("tools", "annotations") diff --git a/alembic/versions/e75490e949b1_add_improved_status_to_tables.py b/mcpgateway/alembic/versions/e75490e949b1_add_improved_status_to_tables.py similarity index 97% rename from alembic/versions/e75490e949b1_add_improved_status_to_tables.py rename to mcpgateway/alembic/versions/e75490e949b1_add_improved_status_to_tables.py index 06c63ef9..097535b4 100644 --- a/alembic/versions/e75490e949b1_add_improved_status_to_tables.py +++ b/mcpgateway/alembic/versions/e75490e949b1_add_improved_status_to_tables.py @@ -41,4 +41,4 @@ def downgrade(): op.drop_column("tools", "reachable") op.alter_column("gateways", "enabled", new_column_name="is_active") - op.drop_column("gateways", "reachable") \ No newline at end of file + op.drop_column("gateways", "reachable") diff --git a/mcpgateway/migrations/env.py b/mcpgateway/migrations/env.py deleted file mode 100644 index 521caedb..00000000 --- a/mcpgateway/migrations/env.py +++ /dev/null @@ -1,84 +0,0 @@ -"""Alembic environment configuration for database migrations. - -This module sets up the Alembic migration environment, configuring the database connection -and metadata for running migrations in both online and offline modes. -""" -from logging.config import fileConfig - -from sqlalchemy import engine_from_config -from sqlalchemy import pool - -from alembic import context - -# this is the Alembic Config object, which provides -# access to the values within the .ini file in use. -config = context.config - -# Interpret the config file for Python logging. -# This line sets up loggers basically. -if config.config_file_name is not None: - fileConfig(config.config_file_name) - -# add your model's MetaData object here -# for 'autogenerate' support -# from myapp import mymodel -# target_metadata = mymodel.Base.metadata -target_metadata = None - -# other values from the config, defined by the needs of env.py, -# can be acquired: -# my_important_option = config.get_main_option("my_important_option") -# ... etc. - - -def run_migrations_offline() -> None: - """Run migrations in 'offline' mode. - - This configures the context with just a URL - and not an Engine, though an Engine is acceptable - here as well. By skipping the Engine creation - we don't even need a DBAPI to be available. - - Calls to context.execute() here emit the given string to the - script output. - - """ - url = config.get_main_option("sqlalchemy.url") - context.configure( - url=url, - target_metadata=target_metadata, - literal_binds=True, - dialect_opts={"paramstyle": "named"}, - ) - - with context.begin_transaction(): - context.run_migrations() - - -def run_migrations_online() -> None: - """Run migrations in 'online' mode. - - In this scenario we need to create an Engine - and associate a connection with the context. - - """ - connectable = engine_from_config( - config.get_section(config.config_ini_section, {}), - prefix="sqlalchemy.", - poolclass=pool.NullPool, - ) - - with connectable.connect() as connection: - context.configure( - connection=connection, target_metadata=target_metadata - ) - - with context.begin_transaction(): - context.run_migrations() - - -if context.is_offline_mode(): - - run_migrations_offline() -else: - run_migrations_online() diff --git a/mcpgateway/schemas.py b/mcpgateway/schemas.py index 6d01eca8..fd4fe45e 100644 --- a/mcpgateway/schemas.py +++ b/mcpgateway/schemas.py @@ -2952,6 +2952,7 @@ class TopPerformer(BaseModelWithConfigDict): success_rate (Optional[float]): Success rate percentage, or None if no metrics. last_execution (Optional[datetime]): Timestamp of the last execution, or None if no metrics. """ + id: Union[str, int] = Field(..., description="Entity ID") name: str = Field(..., description="Entity name") execution_count: int = Field(..., description="Number of executions") diff --git a/mcpgateway/services/prompt_service.py b/mcpgateway/services/prompt_service.py index c8ceedfd..ab2eba64 100644 --- a/mcpgateway/services/prompt_service.py +++ b/mcpgateway/services/prompt_service.py @@ -23,7 +23,7 @@ # Third-Party from jinja2 import Environment, meta, select_autoescape -from sqlalchemy import delete, func, not_, select, desc, case +from sqlalchemy import case, delete, desc, func, not_, select from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import Session @@ -142,30 +142,32 @@ async def shutdown(self) -> None: async def get_top_prompts(self, db: Session, limit: int = 5) -> List[TopPerformer]: """Retrieve the top-performing prompts based on execution count. - Queries the database to get prompts with their metrics, ordered by the number of executions - in descending order. Returns a list of TopPerformer objects containing prompt details and - performance metrics. - - Args: - db (Session): Database session for querying prompt metrics. - limit (int, optional): Maximum number of prompts to return. Defaults to 5. - - Returns: - List[TopPerformer]: A list of TopPerformer objects, each containing: - - id: Prompt ID. - - name: Prompt name. - - execution_count: Total number of executions. - - avg_response_time: Average response time in seconds, or None if no metrics. - - success_rate: Success rate percentage, or None if no metrics. - - last_execution: Timestamp of the last execution, or None if no metrics. - """ + Queries the database to get prompts with their metrics, ordered by the number of executions + in descending order. Returns a list of TopPerformer objects containing prompt details and + performance metrics. + + Args: + db (Session): Database session for querying prompt metrics. + limit (int): Maximum number of prompts to return. Defaults to 5. + + Returns: + List[TopPerformer]: A list of TopPerformer objects, each containing: + - id: Prompt ID. + - name: Prompt name. + - execution_count: Total number of executions. + - avg_response_time: Average response time in seconds, or None if no metrics. + - success_rate: Success rate percentage, or None if no metrics. + - last_execution: Timestamp of the last execution, or None if no metrics. + """ results = ( db.query( DbPrompt.id, DbPrompt.name, func.count(PromptMetric.id).label("execution_count"), # pylint: disable=not-callable func.avg(PromptMetric.response_time).label("avg_response_time"), # pylint: disable=not-callable - (func.sum(case((PromptMetric.is_success, 1), else_=0)) / func.count(PromptMetric.id) * 100).label("success_rate"), # pylint: disable=not-callable + case((func.count(PromptMetric.id) > 0, func.sum(case((PromptMetric.is_success, 1), else_=0)) / func.count(PromptMetric.id) * 100), else_=None).label( + "success_rate" + ), # pylint: disable=not-callable func.max(PromptMetric.timestamp).label("last_execution"), # pylint: disable=not-callable ) .outerjoin(PromptMetric) diff --git a/mcpgateway/services/resource_service.py b/mcpgateway/services/resource_service.py index db6ba41e..8ed317e0 100644 --- a/mcpgateway/services/resource_service.py +++ b/mcpgateway/services/resource_service.py @@ -33,7 +33,7 @@ # Third-Party import parse -from sqlalchemy import delete, func, not_, select, desc, case +from sqlalchemy import case, delete, desc, func, not_, select from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import Session @@ -43,14 +43,7 @@ from mcpgateway.db import ResourceSubscription as DbSubscription from mcpgateway.db import server_resource_association from mcpgateway.models import ResourceContent, ResourceTemplate, TextContent -from mcpgateway.schemas import ( - ResourceCreate, - ResourceMetrics, - ResourceRead, - ResourceSubscription, - ResourceUpdate, - TopPerformer -) +from mcpgateway.schemas import ResourceCreate, ResourceMetrics, ResourceRead, ResourceSubscription, ResourceUpdate, TopPerformer from mcpgateway.services.logging_service import LoggingService # Initialize logging service first @@ -122,30 +115,32 @@ async def shutdown(self) -> None: async def get_top_resources(self, db: Session, limit: int = 5) -> List[TopPerformer]: """Retrieve the top-performing resources based on execution count. - Queries the database to get resources with their metrics, ordered by the number of executions - in descending order. Uses the resource URI as the name field for TopPerformer objects. - Returns a list of TopPerformer objects containing resource details and performance metrics. - - Args: - db (Session): Database session for querying resource metrics. - limit (int, optional): Maximum number of resources to return. Defaults to 5. - - Returns: - List[TopPerformer]: A list of TopPerformer objects, each containing: - - id: Resource ID. - - name: Resource URI (used as the name field). - - execution_count: Total number of executions. - - avg_response_time: Average response time in seconds, or None if no metrics. - - success_rate: Success rate percentage, or None if no metrics. - - last_execution: Timestamp of the last execution, or None if no metrics. - """ + Queries the database to get resources with their metrics, ordered by the number of executions + in descending order. Uses the resource URI as the name field for TopPerformer objects. + Returns a list of TopPerformer objects containing resource details and performance metrics. + + Args: + db (Session): Database session for querying resource metrics. + limit (int): Maximum number of resources to return. Defaults to 5. + + Returns: + List[TopPerformer]: A list of TopPerformer objects, each containing: + - id: Resource ID. + - name: Resource URI (used as the name field). + - execution_count: Total number of executions. + - avg_response_time: Average response time in seconds, or None if no metrics. + - success_rate: Success rate percentage, or None if no metrics. + - last_execution: Timestamp of the last execution, or None if no metrics. + """ results = ( db.query( DbResource.id, DbResource.uri.label("name"), # Using URI as the name field for TopPerformer func.count(ResourceMetric.id).label("execution_count"), # pylint: disable=not-callable func.avg(ResourceMetric.response_time).label("avg_response_time"), # pylint: disable=not-callable - (func.sum(case((ResourceMetric.is_success, 1), else_=0)) / func.count(ResourceMetric.id) * 100).label("success_rate"), # pylint: disable=not-callable + case((func.count(ResourceMetric.id) > 0, func.sum(case((ResourceMetric.is_success, 1), else_=0)) / func.count(ResourceMetric.id) * 100), else_=None).label( + "success_rate" + ), # pylint: disable=not-callable func.max(ResourceMetric.timestamp).label("last_execution"), # pylint: disable=not-callable ) .outerjoin(ResourceMetric) diff --git a/mcpgateway/services/server_service.py b/mcpgateway/services/server_service.py index af536c61..308f8f82 100644 --- a/mcpgateway/services/server_service.py +++ b/mcpgateway/services/server_service.py @@ -19,7 +19,7 @@ # Third-Party import httpx -from sqlalchemy import delete, func, not_, select, case, desc +from sqlalchemy import case, delete, desc, func, not_, select from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import Session @@ -130,30 +130,32 @@ async def shutdown(self) -> None: async def get_top_servers(self, db: Session, limit: int = 5) -> List[TopPerformer]: """Retrieve the top-performing servers based on execution count. - Queries the database to get servers with their metrics, ordered by the number of executions - in descending order. Returns a list of TopPerformer objects containing server details and - performance metrics. - - Args: - db (Session): Database session for querying server metrics. - limit (int, optional): Maximum number of servers to return. Defaults to 5. - - Returns: - List[TopPerformer]: A list of TopPerformer objects, each containing: - - id: Server ID. - - name: Server name. - - execution_count: Total number of executions. - - avg_response_time: Average response time in seconds, or None if no metrics. - - success_rate: Success rate percentage, or None if no metrics. - - last_execution: Timestamp of the last execution, or None if no metrics. - """ + Queries the database to get servers with their metrics, ordered by the number of executions + in descending order. Returns a list of TopPerformer objects containing server details and + performance metrics. + + Args: + db (Session): Database session for querying server metrics. + limit (int): Maximum number of servers to return. Defaults to 5. + + Returns: + List[TopPerformer]: A list of TopPerformer objects, each containing: + - id: Server ID. + - name: Server name. + - execution_count: Total number of executions. + - avg_response_time: Average response time in seconds, or None if no metrics. + - success_rate: Success rate percentage, or None if no metrics. + - last_execution: Timestamp of the last execution, or None if no metrics. + """ results = ( db.query( DbServer.id, DbServer.name, func.count(ServerMetric.id).label("execution_count"), # pylint: disable=not-callable func.avg(ServerMetric.response_time).label("avg_response_time"), # pylint: disable=not-callable - (func.sum(case((ServerMetric.is_success, 1), else_=0)) / func.count(ServerMetric.id) * 100).label("success_rate"), # pylint: disable=not-callable + case((func.count(ServerMetric.id) > 0, func.sum(case((ServerMetric.is_success, 1), else_=0)) / func.count(ServerMetric.id) * 100), else_=None).label( + "success_rate" + ), # pylint: disable=not-callable func.max(ServerMetric.timestamp).label("last_execution"), # pylint: disable=not-callable ) .outerjoin(ServerMetric) diff --git a/mcpgateway/services/tool_service.py b/mcpgateway/services/tool_service.py index aef8fd25..bc4980f2 100644 --- a/mcpgateway/services/tool_service.py +++ b/mcpgateway/services/tool_service.py @@ -28,7 +28,7 @@ from mcp import ClientSession from mcp.client.sse import sse_client from mcp.client.streamable_http import streamablehttp_client -from sqlalchemy import delete, func, not_, select, case, desc +from sqlalchemy import case, delete, desc, func, not_, select from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import Session @@ -41,12 +41,7 @@ from mcpgateway.models import TextContent, ToolResult from mcpgateway.plugins.framework.manager import PluginManager from mcpgateway.plugins.framework.plugin_types import GlobalContext, PluginViolationError, ToolPostInvokePayload, ToolPreInvokePayload -from mcpgateway.schemas import ( - ToolCreate, - ToolRead, - ToolUpdate, - TopPerformer -) +from mcpgateway.schemas import ToolCreate, ToolRead, ToolUpdate, TopPerformer from mcpgateway.services.logging_service import LoggingService from mcpgateway.utils.create_slug import slugify from mcpgateway.utils.passthrough_headers import get_passthrough_headers @@ -197,30 +192,32 @@ async def shutdown(self) -> None: async def get_top_tools(self, db: Session, limit: int = 5) -> List[TopPerformer]: """Retrieve the top-performing tools based on execution count. - Queries the database to get tools with their metrics, ordered by the number of executions - in descending order. Returns a list of TopPerformer objects containing tool details and - performance metrics. - - Args: - db (Session): Database session for querying tool metrics. - limit (int, optional): Maximum number of tools to return. Defaults to 5. - - Returns: - List[TopPerformer]: A list of TopPerformer objects, each containing: - - id: Tool ID. - - name: Tool name. - - execution_count: Total number of executions. - - avg_response_time: Average response time in seconds, or None if no metrics. - - success_rate: Success rate percentage, or None if no metrics. - - last_execution: Timestamp of the last execution, or None if no metrics. - """ + Queries the database to get tools with their metrics, ordered by the number of executions + in descending order. Returns a list of TopPerformer objects containing tool details and + performance metrics. + + Args: + db (Session): Database session for querying tool metrics. + limit (int): Maximum number of tools to return. Defaults to 5. + + Returns: + List[TopPerformer]: A list of TopPerformer objects, each containing: + - id: Tool ID. + - name: Tool name. + - execution_count: Total number of executions. + - avg_response_time: Average response time in seconds, or None if no metrics. + - success_rate: Success rate percentage, or None if no metrics. + - last_execution: Timestamp of the last execution, or None if no metrics. + """ results = ( db.query( DbTool.id, DbTool.name, func.count(ToolMetric.id).label("execution_count"), # pylint: disable=not-callable func.avg(ToolMetric.response_time).label("avg_response_time"), # pylint: disable=not-callable - (func.sum(case((ToolMetric.is_success, 1), else_=0)) / func.count(ToolMetric.id) * 100).label("success_rate"), # pylint: disable=not-callable + case((func.count(ToolMetric.id) > 0, func.sum(case((ToolMetric.is_success, 1), else_=0)) / func.count(ToolMetric.id) * 100), else_=None).label( + "success_rate" + ), # pylint: disable=not-callable func.max(ToolMetric.timestamp).label("last_execution"), # pylint: disable=not-callable ) .outerjoin(ToolMetric) diff --git a/tests/e2e/test_main_apis.py b/tests/e2e/test_main_apis.py index 3ff0e72b..21e766e4 100644 --- a/tests/e2e/test_main_apis.py +++ b/tests/e2e/test_main_apis.py @@ -742,8 +742,8 @@ async def test_tool_name_conflict(self, client: AsyncClient, mock_auth): # Try to create duplicate - might succeed with different ID response = await client.post("/tools", json=tool_data, headers=TEST_AUTH_HEADER) - # Accept 409 Conflict as valid for duplicate - assert response.status_code in [200, 409] + # Accept 400, 409, or 200 as valid responses for duplicate + assert response.status_code in [200, 400, 409] if response.status_code == 400: assert "already exists" in response.json()["detail"] diff --git a/tests/unit/mcpgateway/services/test_tool_service.py b/tests/unit/mcpgateway/services/test_tool_service.py index 511558f3..1e941eb2 100644 --- a/tests/unit/mcpgateway/services/test_tool_service.py +++ b/tests/unit/mcpgateway/services/test_tool_service.py @@ -324,13 +324,13 @@ async def test_register_tool_name_conflict(self, tool_service, mock_tool, test_d request_type="POST", ) - # Should raise IntegrityError due to UNIQUE constraint failure + # Should raise ToolError due to UNIQUE constraint failure (wrapped IntegrityError) test_db.commit = Mock(side_effect=IntegrityError("UNIQUE constraint failed: tools.name", None, None)) - with pytest.raises(IntegrityError) as exc_info: + with pytest.raises(ToolError) as exc_info: await tool_service.register_tool(test_db, tool_create) - # Check the error message for UNIQUE constraint failure - assert "UNIQUE constraint failed: tools.name" in str(exc_info.value) + # Check the error message for tool name conflict + assert "Tool already exists: test_tool" in str(exc_info.value) @pytest.mark.asyncio async def test_register_inactive_tool_name_conflict(self, tool_service, mock_tool, test_db): @@ -350,13 +350,13 @@ async def test_register_inactive_tool_name_conflict(self, tool_service, mock_too request_type="POST", ) - # Should raise IntegrityError due to UNIQUE constraint failure + # Should raise ToolError due to UNIQUE constraint failure (wrapped IntegrityError) test_db.commit = Mock(side_effect=IntegrityError("UNIQUE constraint failed: tools.name", None, None)) - with pytest.raises(IntegrityError) as exc_info: + with pytest.raises(ToolError) as exc_info: await tool_service.register_tool(test_db, tool_create) - # Check the error message for UNIQUE constraint failure - assert "UNIQUE constraint failed: tools.name" in str(exc_info.value) + # Check the error message for tool name conflict + assert "Tool already exists: test_tool" in str(exc_info.value) @pytest.mark.asyncio async def test_register_tool_db_integrity_error(self, tool_service, test_db): @@ -378,13 +378,13 @@ async def test_register_tool_db_integrity_error(self, tool_service, test_db): request_type="POST", ) - # Should raise IntegrityError - with pytest.raises(IntegrityError) as exc_info: + # Should raise ToolError (wrapped IntegrityError) + with pytest.raises(ToolError) as exc_info: await tool_service.register_tool(test_db, tool_create) - # After exception, rollback should be called - test_db.rollback.assert_called_once() - assert "orig" in str(exc_info.value) + # Verify rollback was called + test_db.rollback.assert_called_once() + assert "Tool already exists: test_tool" in str(exc_info.value) @pytest.mark.asyncio async def test_list_tools(self, tool_service, mock_tool, test_db): diff --git a/tests/unit/mcpgateway/test_admin.py b/tests/unit/mcpgateway/test_admin.py index 9559c86e..af156548 100644 --- a/tests/unit/mcpgateway/test_admin.py +++ b/tests/unit/mcpgateway/test_admin.py @@ -998,7 +998,11 @@ class TestAdminMetricsRoutes: @patch.object(ResourceService, "aggregate_metrics", new_callable=AsyncMock) @patch.object(ServerService, "aggregate_metrics", new_callable=AsyncMock) @patch.object(PromptService, "aggregate_metrics", new_callable=AsyncMock) - async def test_admin_get_metrics_with_nulls(self, mock_prompt_metrics, mock_server_metrics, mock_resource_metrics, mock_tool_metrics, mock_db): + @patch.object(ToolService, "get_top_tools", new_callable=AsyncMock) + @patch.object(ResourceService, "get_top_resources", new_callable=AsyncMock) + @patch.object(ServerService, "get_top_servers", new_callable=AsyncMock) + @patch.object(PromptService, "get_top_prompts", new_callable=AsyncMock) + async def test_admin_get_metrics_with_nulls(self, mock_prompt_top, mock_server_top, mock_resource_top, mock_tool_top, mock_prompt_metrics, mock_server_metrics, mock_resource_metrics, mock_tool_metrics, mock_db): """Test getting metrics with null values.""" # Some services return metrics with null values mock_tool_metrics.return_value = ToolMetrics( @@ -1026,15 +1030,23 @@ async def test_admin_get_metrics_with_nulls(self, mock_prompt_metrics, mock_serv mock_server_metrics.return_value = None # No metrics available mock_prompt_metrics.return_value = None - # result = await admin_get_metrics(mock_db, "test-user") - result = await get_aggregated_metrics(mock_db, "test-user") - + # Mock top performers to return empty lists + mock_tool_top.return_value = [] + mock_resource_top.return_value = [] + mock_server_top.return_value = [] + mock_prompt_top.return_value = [] + # result = await admin_get_metrics(mock_db, "test-user") + result = await get_aggregated_metrics(mock_db) assert result["tools"].total_executions == 0 assert result["resources"].total_executions == 100 assert result["servers"] is None assert result["prompts"] is None + # Check that topPerformers structure exists + assert "topPerformers" in result + assert result["topPerformers"]["tools"] == [] + assert result["topPerformers"]["resources"] == [] @patch.object(ToolService, "reset_metrics", new_callable=AsyncMock) @patch.object(ResourceService, "reset_metrics", new_callable=AsyncMock) From c8e41e1f636031c0858cbea3b64ea040e471893e Mon Sep 17 00:00:00 2001 From: Mihai Criveti Date: Sun, 10 Aug 2025 09:13:48 +0100 Subject: [PATCH 05/11] Fix lint issues, rebae Signed-off-by: Mihai Criveti --- .pylintrc | 2 +- mcpgateway/schemas.py | 2 +- mcpgateway/services/logging_service.py | 2 +- mcpgateway/services/prompt_service.py | 25 +++--- mcpgateway/services/resource_service.py | 25 +++--- mcpgateway/services/server_service.py | 25 +++--- mcpgateway/services/tool_service.py | 35 ++++---- mcpgateway/utils/metrics_common.py | 65 +++++++++++++++ .../cache/test_session_registry_extended.py | 82 +++++++++---------- 9 files changed, 156 insertions(+), 107 deletions(-) create mode 100644 mcpgateway/utils/metrics_common.py diff --git a/.pylintrc b/.pylintrc index e4b355bb..ca1827b6 100644 --- a/.pylintrc +++ b/.pylintrc @@ -575,7 +575,7 @@ contextmanager-decorators=contextlib.contextmanager # List of members which are set dynamically and missed by pylint inference # system, and so shouldn't trigger E1101 when accessed. Python regular # expressions are accepted. -generated-members= +generated-members=sqlalchemy.func.* # Tells whether to warn about missing members when the owner of the attribute # is inferred to be None. diff --git a/mcpgateway/schemas.py b/mcpgateway/schemas.py index fd4fe45e..9f0a68c1 100644 --- a/mcpgateway/schemas.py +++ b/mcpgateway/schemas.py @@ -590,7 +590,7 @@ def prevent_manual_mcp_creation(cls, values: Dict[str, Any]) -> Dict[str, Any]: """ integration_type = values.get("integration_type") if integration_type == "MCP": - raise ValueError("Cannot manually create MCP tools. Add MCP servers via the Gateways interface - " "tools will be auto-discovered and registered with integration_type='MCP'.") + raise ValueError("Cannot manually create MCP tools. Add MCP servers via the Gateways interface - tools will be auto-discovered and registered with integration_type='MCP'.") return values diff --git a/mcpgateway/services/logging_service.py b/mcpgateway/services/logging_service.py index 33b7d97a..4888ac93 100644 --- a/mcpgateway/services/logging_service.py +++ b/mcpgateway/services/logging_service.py @@ -123,7 +123,7 @@ async def initialize(self) -> None: try: root_logger.addHandler(_get_file_handler()) if settings.log_rotation_enabled: - logging.info(f"File logging enabled with rotation: {settings.log_folder or '.'}/{settings.log_file} " f"(max: {settings.log_max_size_mb}MB, backups: {settings.log_backup_count})") + logging.info(f"File logging enabled with rotation: {settings.log_folder or '.'}/{settings.log_file} (max: {settings.log_max_size_mb}MB, backups: {settings.log_backup_count})") else: logging.info(f"File logging enabled (no rotation): {settings.log_folder or '.'}/{settings.log_file}") except Exception as e: diff --git a/mcpgateway/services/prompt_service.py b/mcpgateway/services/prompt_service.py index ab2eba64..2ce7a46b 100644 --- a/mcpgateway/services/prompt_service.py +++ b/mcpgateway/services/prompt_service.py @@ -23,7 +23,7 @@ # Third-Party from jinja2 import Environment, meta, select_autoescape -from sqlalchemy import case, delete, desc, func, not_, select +from sqlalchemy import case, delete, desc, func, not_, select, Float from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import Session @@ -35,6 +35,7 @@ from mcpgateway.plugins import GlobalContext, PluginManager, PluginViolationError, PromptPosthookPayload, PromptPrehookPayload from mcpgateway.schemas import PromptCreate, PromptRead, PromptUpdate, TopPerformer from mcpgateway.services.logging_service import LoggingService +from mcpgateway.utils.metrics_common import build_top_performers # Initialize logging service first logging_service = LoggingService() @@ -165,9 +166,13 @@ async def get_top_prompts(self, db: Session, limit: int = 5) -> List[TopPerforme DbPrompt.name, func.count(PromptMetric.id).label("execution_count"), # pylint: disable=not-callable func.avg(PromptMetric.response_time).label("avg_response_time"), # pylint: disable=not-callable - case((func.count(PromptMetric.id) > 0, func.sum(case((PromptMetric.is_success, 1), else_=0)) / func.count(PromptMetric.id) * 100), else_=None).label( - "success_rate" - ), # pylint: disable=not-callable + case( + ( + func.count(PromptMetric.id) > 0, # pylint: disable=not-callable + func.sum(case((PromptMetric.is_success.is_(True), 1), else_=0)).cast(Float) / func.count(PromptMetric.id) * 100, # pylint: disable=not-callable + ), + else_=None, + ).label("success_rate"), func.max(PromptMetric.timestamp).label("last_execution"), # pylint: disable=not-callable ) .outerjoin(PromptMetric) @@ -177,17 +182,7 @@ async def get_top_prompts(self, db: Session, limit: int = 5) -> List[TopPerforme .all() ) - return [ - TopPerformer( - id=result.id, - name=result.name, - execution_count=result.execution_count or 0, - avg_response_time=float(result.avg_response_time) if result.avg_response_time else None, - success_rate=float(result.success_rate) if result.success_rate else None, - last_execution=result.last_execution, - ) - for result in results - ] + return build_top_performers(results) def _convert_db_prompt(self, db_prompt: DbPrompt) -> Dict[str, Any]: """ diff --git a/mcpgateway/services/resource_service.py b/mcpgateway/services/resource_service.py index 8ed317e0..d3e6421b 100644 --- a/mcpgateway/services/resource_service.py +++ b/mcpgateway/services/resource_service.py @@ -33,7 +33,7 @@ # Third-Party import parse -from sqlalchemy import case, delete, desc, func, not_, select +from sqlalchemy import case, delete, desc, func, not_, select, Float from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import Session @@ -45,6 +45,7 @@ from mcpgateway.models import ResourceContent, ResourceTemplate, TextContent from mcpgateway.schemas import ResourceCreate, ResourceMetrics, ResourceRead, ResourceSubscription, ResourceUpdate, TopPerformer from mcpgateway.services.logging_service import LoggingService +from mcpgateway.utils.metrics_common import build_top_performers # Initialize logging service first logging_service = LoggingService() @@ -138,9 +139,13 @@ async def get_top_resources(self, db: Session, limit: int = 5) -> List[TopPerfor DbResource.uri.label("name"), # Using URI as the name field for TopPerformer func.count(ResourceMetric.id).label("execution_count"), # pylint: disable=not-callable func.avg(ResourceMetric.response_time).label("avg_response_time"), # pylint: disable=not-callable - case((func.count(ResourceMetric.id) > 0, func.sum(case((ResourceMetric.is_success, 1), else_=0)) / func.count(ResourceMetric.id) * 100), else_=None).label( - "success_rate" - ), # pylint: disable=not-callable + case( + ( + func.count(ResourceMetric.id) > 0, # pylint: disable=not-callable + func.sum(case((ResourceMetric.is_success.is_(True), 1), else_=0)).cast(Float) / func.count(ResourceMetric.id) * 100, # pylint: disable=not-callable + ), + else_=None, + ).label("success_rate"), func.max(ResourceMetric.timestamp).label("last_execution"), # pylint: disable=not-callable ) .outerjoin(ResourceMetric) @@ -150,17 +155,7 @@ async def get_top_resources(self, db: Session, limit: int = 5) -> List[TopPerfor .all() ) - return [ - TopPerformer( - id=result.id, - name=result.name, - execution_count=result.execution_count or 0, - avg_response_time=float(result.avg_response_time) if result.avg_response_time else None, - success_rate=float(result.success_rate) if result.success_rate else None, - last_execution=result.last_execution, - ) - for result in results - ] + return build_top_performers(results) def _convert_resource_to_read(self, resource: DbResource) -> ResourceRead: """ diff --git a/mcpgateway/services/server_service.py b/mcpgateway/services/server_service.py index 308f8f82..743e73b7 100644 --- a/mcpgateway/services/server_service.py +++ b/mcpgateway/services/server_service.py @@ -19,7 +19,7 @@ # Third-Party import httpx -from sqlalchemy import case, delete, desc, func, not_, select +from sqlalchemy import case, delete, desc, func, not_, select, Float from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import Session @@ -32,6 +32,7 @@ from mcpgateway.db import Tool as DbTool from mcpgateway.schemas import ServerCreate, ServerMetrics, ServerRead, ServerUpdate, TopPerformer from mcpgateway.services.logging_service import LoggingService +from mcpgateway.utils.metrics_common import build_top_performers # Initialize logging service first logging_service = LoggingService() @@ -153,9 +154,13 @@ async def get_top_servers(self, db: Session, limit: int = 5) -> List[TopPerforme DbServer.name, func.count(ServerMetric.id).label("execution_count"), # pylint: disable=not-callable func.avg(ServerMetric.response_time).label("avg_response_time"), # pylint: disable=not-callable - case((func.count(ServerMetric.id) > 0, func.sum(case((ServerMetric.is_success, 1), else_=0)) / func.count(ServerMetric.id) * 100), else_=None).label( - "success_rate" - ), # pylint: disable=not-callable + case( + ( + func.count(ServerMetric.id) > 0, # pylint: disable=not-callable + func.sum(case((ServerMetric.is_success.is_(True), 1), else_=0)).cast(Float) / func.count(ServerMetric.id) * 100, # pylint: disable=not-callable + ), + else_=None, + ).label("success_rate"), func.max(ServerMetric.timestamp).label("last_execution"), # pylint: disable=not-callable ) .outerjoin(ServerMetric) @@ -165,17 +170,7 @@ async def get_top_servers(self, db: Session, limit: int = 5) -> List[TopPerforme .all() ) - return [ - TopPerformer( - id=result.id, - name=result.name, - execution_count=result.execution_count or 0, - avg_response_time=float(result.avg_response_time) if result.avg_response_time else None, - success_rate=float(result.success_rate) if result.success_rate else None, - last_execution=result.last_execution, - ) - for result in results - ] + return build_top_performers(results) def _convert_server_to_read(self, server: DbServer) -> ServerRead: """ diff --git a/mcpgateway/services/tool_service.py b/mcpgateway/services/tool_service.py index bc4980f2..13220efc 100644 --- a/mcpgateway/services/tool_service.py +++ b/mcpgateway/services/tool_service.py @@ -28,7 +28,7 @@ from mcp import ClientSession from mcp.client.sse import sse_client from mcp.client.streamable_http import streamablehttp_client -from sqlalchemy import case, delete, desc, func, not_, select +from sqlalchemy import case, delete, desc, func, not_, select, Float from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import Session @@ -44,6 +44,7 @@ from mcpgateway.schemas import ToolCreate, ToolRead, ToolUpdate, TopPerformer from mcpgateway.services.logging_service import LoggingService from mcpgateway.utils.create_slug import slugify +from mcpgateway.utils.metrics_common import build_top_performers from mcpgateway.utils.passthrough_headers import get_passthrough_headers from mcpgateway.utils.retry_manager import ResilientHttpClient from mcpgateway.utils.services_auth import decode_auth @@ -215,9 +216,13 @@ async def get_top_tools(self, db: Session, limit: int = 5) -> List[TopPerformer] DbTool.name, func.count(ToolMetric.id).label("execution_count"), # pylint: disable=not-callable func.avg(ToolMetric.response_time).label("avg_response_time"), # pylint: disable=not-callable - case((func.count(ToolMetric.id) > 0, func.sum(case((ToolMetric.is_success, 1), else_=0)) / func.count(ToolMetric.id) * 100), else_=None).label( - "success_rate" - ), # pylint: disable=not-callable + case( + ( + func.count(ToolMetric.id) > 0, # pylint: disable=not-callable + func.sum(case((ToolMetric.is_success.is_(True), 1), else_=0)).cast(Float) / func.count(ToolMetric.id) * 100, # pylint: disable=not-callable + ), + else_=None, + ).label("success_rate"), func.max(ToolMetric.timestamp).label("last_execution"), # pylint: disable=not-callable ) .outerjoin(ToolMetric) @@ -227,17 +232,7 @@ async def get_top_tools(self, db: Session, limit: int = 5) -> List[TopPerformer] .all() ) - return [ - TopPerformer( - id=result.id, - name=result.name, - execution_count=result.execution_count or 0, - avg_response_time=float(result.avg_response_time) if result.avg_response_time else None, - success_rate=float(result.success_rate) if result.success_rate else None, - last_execution=result.last_execution, - ) - for result in results - ] + return build_top_performers(results) def _convert_tool_to_read(self, tool: DbTool) -> ToolRead: """Converts a DbTool instance into a ToolRead model, including aggregated metrics and @@ -734,18 +729,21 @@ async def invoke_tool(self, db: Session, name: str, arguments: Dict[str, Any], r # Handle 204 No Content responses that have no body if response.status_code == 204: tool_result = ToolResult(content=[TextContent(type="text", text="Request completed successfully (No Content)")]) + # Mark as successful only after all operations complete successfully + success = True elif response.status_code not in [200, 201, 202, 206]: result = response.json() tool_result = ToolResult( content=[TextContent(type="text", text=str(result["error"]) if "error" in result else "Tool error encountered")], is_error=True, ) + # Don't mark as successful for error responses - success remains False else: result = response.json() filtered_response = extract_using_jq(result, tool.jsonpath_filter) tool_result = ToolResult(content=[TextContent(type="text", text=json.dumps(filtered_response, indent=2))]) - - success = True + # Mark as successful only after all operations complete successfully + success = True elif tool.integration_type == "MCP": transport = tool.request_type.lower() gateway = db.execute(select(DbGateway).where(DbGateway.id == tool.gateway_id).where(DbGateway.enabled)).scalar_one_or_none() @@ -795,9 +793,10 @@ async def connect_to_streamablehttp_server(server_url: str): tool_call_result = await connect_to_streamablehttp_server(tool_gateway.url) content = tool_call_result.model_dump(by_alias=True).get("content", []) - success = True filtered_response = extract_using_jq(content, tool.jsonpath_filter) tool_result = ToolResult(content=filtered_response) + # Mark as successful only after all operations complete successfully + success = True else: tool_result = ToolResult(content=[TextContent(type="text", text="Invalid tool type")]) diff --git a/mcpgateway/utils/metrics_common.py b/mcpgateway/utils/metrics_common.py new file mode 100644 index 00000000..b10cfff6 --- /dev/null +++ b/mcpgateway/utils/metrics_common.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- +""" +Common utilities for metrics handling across service modules. + +Copyright 2025 +SPDX-License-Identifier: Apache-2.0 +Authors: Mihai Criveti +""" + +# Standard +from typing import List + +# First-Party +from mcpgateway.schemas import TopPerformer + + +def build_top_performers(results: List) -> List[TopPerformer]: + """ + Convert database query results to TopPerformer objects. + + This utility function eliminates code duplication across service modules + that need to convert database query results with metrics into TopPerformer objects. + + Args: + results: List of database query results, each containing: + - id: Entity ID + - name: Entity name + - execution_count: Total executions + - avg_response_time: Average response time + - success_rate: Success rate percentage + - last_execution: Last execution timestamp + + Returns: + List[TopPerformer]: List of TopPerformer objects with proper type conversions + + Examples: + >>> from unittest.mock import MagicMock + >>> result = MagicMock() + >>> result.id = 1 + >>> result.name = "test" + >>> result.execution_count = 10 + >>> result.avg_response_time = 1.5 + >>> result.success_rate = 85.0 + >>> result.last_execution = None + >>> performers = build_top_performers([result]) + >>> len(performers) + 1 + >>> performers[0].id + 1 + >>> performers[0].execution_count + 10 + >>> performers[0].avg_response_time + 1.5 + """ + return [ + TopPerformer( + id=result.id, + name=result.name, + execution_count=result.execution_count or 0, + avg_response_time=float(result.avg_response_time) if result.avg_response_time else None, + success_rate=float(result.success_rate) if result.success_rate else None, + last_execution=result.last_execution, + ) + for result in results + ] diff --git a/tests/unit/mcpgateway/cache/test_session_registry_extended.py b/tests/unit/mcpgateway/cache/test_session_registry_extended.py index 6a3195ad..a528e42e 100644 --- a/tests/unit/mcpgateway/cache/test_session_registry_extended.py +++ b/tests/unit/mcpgateway/cache/test_session_registry_extended.py @@ -31,7 +31,7 @@ def test_redis_import_error_flag(self): import importlib import mcpgateway.cache.session_registry importlib.reload(mcpgateway.cache.session_registry) - + # Should set REDIS_AVAILABLE = False assert not mcpgateway.cache.session_registry.REDIS_AVAILABLE @@ -41,7 +41,7 @@ def test_sqlalchemy_import_error_flag(self): import importlib import mcpgateway.cache.session_registry importlib.reload(mcpgateway.cache.session_registry) - + # Should set SQLALCHEMY_AVAILABLE = False assert not mcpgateway.cache.session_registry.SQLALCHEMY_AVAILABLE @@ -53,7 +53,7 @@ class TestNoneBackend: async def test_none_backend_initialization_logging(self, caplog): """Test that 'none' backend logs initialization message.""" registry = SessionRegistry(backend="none") - + # Check that initialization message is logged assert "Session registry initialized with 'none' backend - session tracking disabled" in caplog.text @@ -61,10 +61,10 @@ async def test_none_backend_initialization_logging(self, caplog): async def test_none_backend_initialize_method(self): """Test 'none' backend initialize method does nothing.""" registry = SessionRegistry(backend="none") - + # Should not raise any errors await registry.initialize() - + # No cleanup task should be created assert registry._cleanup_task is None @@ -78,22 +78,22 @@ async def test_redis_add_session_error(self, monkeypatch, caplog): mock_redis = AsyncMock() mock_redis.setex = AsyncMock(side_effect=Exception("Redis connection error")) mock_redis.publish = AsyncMock() - + with patch('mcpgateway.cache.session_registry.REDIS_AVAILABLE', True): with patch('mcpgateway.cache.session_registry.Redis') as MockRedis: MockRedis.from_url.return_value = mock_redis - + registry = SessionRegistry(backend="redis", redis_url="redis://localhost") - + class DummyTransport: async def disconnect(self): pass async def is_connected(self): return True - + transport = DummyTransport() await registry.add_session("test_session", transport) - + # Should log the Redis error assert "Redis error adding session test_session: Redis connection error" in caplog.text @@ -102,15 +102,15 @@ async def test_redis_broadcast_error(self, monkeypatch, caplog): """Test Redis error during broadcast.""" mock_redis = AsyncMock() mock_redis.publish = AsyncMock(side_effect=Exception("Redis publish error")) - + with patch('mcpgateway.cache.session_registry.REDIS_AVAILABLE', True): with patch('mcpgateway.cache.session_registry.Redis') as MockRedis: MockRedis.from_url.return_value = mock_redis - + registry = SessionRegistry(backend="redis", redis_url="redis://localhost") - + await registry.broadcast("test_session", {"test": "message"}) - + # Should log the Redis error assert "Redis error during broadcast: Redis publish error" in caplog.text @@ -118,7 +118,7 @@ async def test_redis_broadcast_error(self, monkeypatch, caplog): class TestDatabaseBackendErrors: """Test database backend error scenarios.""" - @pytest.mark.asyncio + @pytest.mark.asyncio async def test_database_add_session_error(self, monkeypatch, caplog): """Test database error during add_session.""" def mock_get_db(): @@ -127,24 +127,24 @@ def mock_get_db(): mock_session.rollback = Mock() mock_session.close = Mock() yield mock_session - + with patch('mcpgateway.cache.session_registry.SQLALCHEMY_AVAILABLE', True): with patch('mcpgateway.cache.session_registry.get_db', mock_get_db): with patch('asyncio.to_thread') as mock_to_thread: # Simulate the database error being raised from the thread mock_to_thread.side_effect = Exception("Database connection error") - + registry = SessionRegistry(backend="database", database_url="sqlite:///test.db") - + class DummyTransport: async def disconnect(self): pass async def is_connected(self): return True - + transport = DummyTransport() await registry.add_session("test_session", transport) - + # Should log the database error assert "Database error adding session test_session: Database connection error" in caplog.text @@ -157,17 +157,17 @@ def mock_get_db(): mock_session.rollback = Mock() mock_session.close = Mock() yield mock_session - + with patch('mcpgateway.cache.session_registry.SQLALCHEMY_AVAILABLE', True): with patch('mcpgateway.cache.session_registry.get_db', mock_get_db): with patch('asyncio.to_thread') as mock_to_thread: # Simulate the database error being raised from the thread mock_to_thread.side_effect = Exception("Database broadcast error") - + registry = SessionRegistry(backend="database", database_url="sqlite:///test.db") - + await registry.broadcast("test_session", {"test": "message"}) - + # Should log the database error assert "Database error during broadcast: Database broadcast error" in caplog.text @@ -180,16 +180,16 @@ async def test_memory_backend_initialization_logging(self, caplog): """Test memory backend initialization creates cleanup task.""" registry = SessionRegistry(backend="memory") await registry.initialize() - + try: # Should log initialization assert "Initializing session registry with backend: memory" in caplog.text assert "Memory cleanup task started" in caplog.text - + # Should have created cleanup task assert registry._cleanup_task is not None assert not registry._cleanup_task.done() - + finally: await registry.shutdown() @@ -199,16 +199,16 @@ async def test_database_backend_initialization_logging(self, caplog): with patch('mcpgateway.cache.session_registry.SQLALCHEMY_AVAILABLE', True): registry = SessionRegistry(backend="database", database_url="sqlite:///test.db") await registry.initialize() - + try: - # Should log initialization + # Should log initialization assert "Initializing session registry with backend: database" in caplog.text assert "Database cleanup task started" in caplog.text - + # Should have created cleanup task assert registry._cleanup_task is not None assert not registry._cleanup_task.done() - + finally: await registry.shutdown() @@ -218,18 +218,18 @@ async def test_redis_initialization_subscribe(self, monkeypatch): mock_redis = AsyncMock() mock_pubsub = AsyncMock() mock_redis.pubsub = Mock(return_value=mock_pubsub) # Use Mock for sync method - + with patch('mcpgateway.cache.session_registry.REDIS_AVAILABLE', True): with patch('mcpgateway.cache.session_registry.Redis') as MockRedis: MockRedis.from_url.return_value = mock_redis - + registry = SessionRegistry(backend="redis", redis_url="redis://localhost") await registry.initialize() - + try: # Should have subscribed to events channel mock_pubsub.subscribe.assert_called_once_with("mcp_session_events") - + finally: await registry.shutdown() @@ -238,12 +238,12 @@ async def test_shutdown_cancels_cleanup_task(self): """Test shutdown properly cancels cleanup tasks.""" registry = SessionRegistry(backend="memory") await registry.initialize() - + original_task = registry._cleanup_task assert not original_task.cancelled() - + await registry.shutdown() - + # Task should be cancelled assert original_task.cancelled() @@ -252,9 +252,9 @@ async def test_shutdown_handles_already_cancelled_task(self): """Test shutdown handles already cancelled cleanup task.""" registry = SessionRegistry(backend="memory") await registry.initialize() - + # Cancel task before shutdown registry._cleanup_task.cancel() - + # Shutdown should not raise error - await registry.shutdown() \ No newline at end of file + await registry.shutdown() From 810e7d396b608b1fdfaa363be8aa246c343e6e49 Mon Sep 17 00:00:00 2001 From: Mihai Criveti Date: Sun, 10 Aug 2025 09:28:03 +0100 Subject: [PATCH 06/11] Fix lint issues, rebased Signed-off-by: Mihai Criveti --- mcpgateway/services/prompt_service.py | 8 ++++---- mcpgateway/services/resource_service.py | 8 ++++---- mcpgateway/services/server_service.py | 8 ++++---- mcpgateway/services/tool_service.py | 8 ++++---- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/mcpgateway/services/prompt_service.py b/mcpgateway/services/prompt_service.py index 2ce7a46b..fb826827 100644 --- a/mcpgateway/services/prompt_service.py +++ b/mcpgateway/services/prompt_service.py @@ -23,7 +23,7 @@ # Third-Party from jinja2 import Environment, meta, select_autoescape -from sqlalchemy import case, delete, desc, func, not_, select, Float +from sqlalchemy import case, delete, desc, Float, func, not_, select from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import Session @@ -169,7 +169,7 @@ async def get_top_prompts(self, db: Session, limit: int = 5) -> List[TopPerforme case( ( func.count(PromptMetric.id) > 0, # pylint: disable=not-callable - func.sum(case((PromptMetric.is_success.is_(True), 1), else_=0)).cast(Float) / func.count(PromptMetric.id) * 100, # pylint: disable=not-callable + func.sum(case((PromptMetric.is_success == 1, 1), else_=0)).cast(Float) / func.count(PromptMetric.id) * 100, # pylint: disable=not-callable ), else_=None, ).label("success_rate"), @@ -1050,8 +1050,8 @@ async def aggregate_metrics(self, db: Session) -> Dict[str, Any]: """ total = db.execute(select(func.count(PromptMetric.id))).scalar() or 0 # pylint: disable=not-callable - successful = db.execute(select(func.count(PromptMetric.id)).where(PromptMetric.is_success)).scalar() or 0 # pylint: disable=not-callable - failed = db.execute(select(func.count(PromptMetric.id)).where(not_(PromptMetric.is_success))).scalar() or 0 # pylint: disable=not-callable + successful = db.execute(select(func.count(PromptMetric.id)).where(PromptMetric.is_success == 1)).scalar() or 0 # pylint: disable=not-callable + failed = db.execute(select(func.count(PromptMetric.id)).where(PromptMetric.is_success == 0)).scalar() or 0 # pylint: disable=not-callable failure_rate = failed / total if total > 0 else 0.0 min_rt = db.execute(select(func.min(PromptMetric.response_time))).scalar() max_rt = db.execute(select(func.max(PromptMetric.response_time))).scalar() diff --git a/mcpgateway/services/resource_service.py b/mcpgateway/services/resource_service.py index d3e6421b..24412fbe 100644 --- a/mcpgateway/services/resource_service.py +++ b/mcpgateway/services/resource_service.py @@ -33,7 +33,7 @@ # Third-Party import parse -from sqlalchemy import case, delete, desc, func, not_, select, Float +from sqlalchemy import case, delete, desc, Float, func, not_, select from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import Session @@ -142,7 +142,7 @@ async def get_top_resources(self, db: Session, limit: int = 5) -> List[TopPerfor case( ( func.count(ResourceMetric.id) > 0, # pylint: disable=not-callable - func.sum(case((ResourceMetric.is_success.is_(True), 1), else_=0)).cast(Float) / func.count(ResourceMetric.id) * 100, # pylint: disable=not-callable + func.sum(case((ResourceMetric.is_success == 1, 1), else_=0)).cast(Float) / func.count(ResourceMetric.id) * 100, # pylint: disable=not-callable ), else_=None, ).label("success_rate"), @@ -1040,9 +1040,9 @@ async def aggregate_metrics(self, db: Session) -> ResourceMetrics: """ total_executions = db.execute(select(func.count()).select_from(ResourceMetric)).scalar() or 0 # pylint: disable=not-callable - successful_executions = db.execute(select(func.count()).select_from(ResourceMetric).where(ResourceMetric.is_success)).scalar() or 0 # pylint: disable=not-callable + successful_executions = db.execute(select(func.count()).select_from(ResourceMetric).where(ResourceMetric.is_success == 1)).scalar() or 0 # pylint: disable=not-callable - failed_executions = db.execute(select(func.count()).select_from(ResourceMetric).where(not_(ResourceMetric.is_success))).scalar() or 0 # pylint: disable=not-callable + failed_executions = db.execute(select(func.count()).select_from(ResourceMetric).where(ResourceMetric.is_success == 0)).scalar() or 0 # pylint: disable=not-callable min_response_time = db.execute(select(func.min(ResourceMetric.response_time))).scalar() diff --git a/mcpgateway/services/server_service.py b/mcpgateway/services/server_service.py index 743e73b7..46a1db0d 100644 --- a/mcpgateway/services/server_service.py +++ b/mcpgateway/services/server_service.py @@ -19,7 +19,7 @@ # Third-Party import httpx -from sqlalchemy import case, delete, desc, func, not_, select, Float +from sqlalchemy import case, delete, desc, Float, func, select from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import Session @@ -157,7 +157,7 @@ async def get_top_servers(self, db: Session, limit: int = 5) -> List[TopPerforme case( ( func.count(ServerMetric.id) > 0, # pylint: disable=not-callable - func.sum(case((ServerMetric.is_success.is_(True), 1), else_=0)).cast(Float) / func.count(ServerMetric.id) * 100, # pylint: disable=not-callable + func.sum(case((ServerMetric.is_success == 1, 1), else_=0)).cast(Float) / func.count(ServerMetric.id) * 100, # pylint: disable=not-callable ), else_=None, ).label("success_rate"), @@ -833,9 +833,9 @@ async def aggregate_metrics(self, db: Session) -> ServerMetrics: """ total_executions = db.execute(select(func.count()).select_from(ServerMetric)).scalar() or 0 # pylint: disable=not-callable - successful_executions = db.execute(select(func.count()).select_from(ServerMetric).where(ServerMetric.is_success)).scalar() or 0 # pylint: disable=not-callable + successful_executions = db.execute(select(func.count()).select_from(ServerMetric).where(ServerMetric.is_success == 1)).scalar() or 0 # pylint: disable=not-callable - failed_executions = db.execute(select(func.count()).select_from(ServerMetric).where(not_(ServerMetric.is_success))).scalar() or 0 # pylint: disable=not-callable + failed_executions = db.execute(select(func.count()).select_from(ServerMetric).where(ServerMetric.is_success == 0)).scalar() or 0 # pylint: disable=not-callable min_response_time = db.execute(select(func.min(ServerMetric.response_time))).scalar() diff --git a/mcpgateway/services/tool_service.py b/mcpgateway/services/tool_service.py index 13220efc..404aa6ba 100644 --- a/mcpgateway/services/tool_service.py +++ b/mcpgateway/services/tool_service.py @@ -28,7 +28,7 @@ from mcp import ClientSession from mcp.client.sse import sse_client from mcp.client.streamable_http import streamablehttp_client -from sqlalchemy import case, delete, desc, func, not_, select, Float +from sqlalchemy import case, delete, desc, Float, func, not_, select from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import Session @@ -219,7 +219,7 @@ async def get_top_tools(self, db: Session, limit: int = 5) -> List[TopPerformer] case( ( func.count(ToolMetric.id) > 0, # pylint: disable=not-callable - func.sum(case((ToolMetric.is_success.is_(True), 1), else_=0)).cast(Float) / func.count(ToolMetric.id) * 100, # pylint: disable=not-callable + func.sum(case((ToolMetric.is_success == 1, 1), else_=0)).cast(Float) / func.count(ToolMetric.id) * 100, # pylint: disable=not-callable ), else_=None, ).label("success_rate"), @@ -1111,8 +1111,8 @@ async def aggregate_metrics(self, db: Session) -> Dict[str, Any]: """ total = db.execute(select(func.count(ToolMetric.id))).scalar() or 0 # pylint: disable=not-callable - successful = db.execute(select(func.count(ToolMetric.id)).where(ToolMetric.is_success)).scalar() or 0 # pylint: disable=not-callable - failed = db.execute(select(func.count(ToolMetric.id)).where(not_(ToolMetric.is_success))).scalar() or 0 # pylint: disable=not-callable + successful = db.execute(select(func.count(ToolMetric.id)).where(ToolMetric.is_success == 1)).scalar() or 0 # pylint: disable=not-callable + failed = db.execute(select(func.count(ToolMetric.id)).where(ToolMetric.is_success == 0)).scalar() or 0 # pylint: disable=not-callable failure_rate = failed / total if total > 0 else 0.0 min_rt = db.execute(select(func.min(ToolMetric.response_time))).scalar() max_rt = db.execute(select(func.max(ToolMetric.response_time))).scalar() From a08ab73bbaa5081091a671132776618163a5318a Mon Sep 17 00:00:00 2001 From: Mihai Criveti Date: Sun, 10 Aug 2025 11:36:52 +0100 Subject: [PATCH 07/11] Fixing the success rate Signed-off-by: Mihai Criveti --- mcpgateway/static/admin.js | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/mcpgateway/static/admin.js b/mcpgateway/static/admin.js index 4e61b919..dd6568ad 100644 --- a/mcpgateway/static/admin.js +++ b/mcpgateway/static/admin.js @@ -1330,7 +1330,12 @@ function createEnhancedTopPerformersSection(topData) { } } function calculateSuccessRate(item) { - const total = item.execution_count || item.executions || 0; + // API returns successRate directly as a percentage + if (item.successRate !== undefined && item.successRate !== null) { + return Math.round(item.successRate); + } + // Fallback for legacy format (if needed) + const total = item.execution_count || item.executions || item.executionCount || 0; const successful = item.successful_count || item.successfulExecutions || 0; return total > 0 ? Math.round((successful / total) * 100) : 0; } @@ -1466,7 +1471,7 @@ function createTopPerformersTable(entityType, data, isActive) { execCell.className = "px-6 py-4 whitespace-nowrap text-sm text-gray-500 dark:text-gray-300 sm:px-6 sm:py-4"; execCell.textContent = formatNumber( - item.execution_count || item.executions || 0, + item.executionCount || item.execution_count || item.executions || 0, ); row.appendChild(execCell); @@ -1627,7 +1632,7 @@ function exportMetricsToCSV(topData) { type, index + 1, `"${escapeHtml(item.name || "Unknown")}"`, - formatNumber(item.execution_count || item.executions || 0), + formatNumber(item.executionCount || item.execution_count || item.executions || 0), item.avg_response_time || item.avgResponseTime ? `${Math.round(item.avg_response_time || item.avgResponseTime)}ms` : "N/A", From 1fae3594ca97a8f7b5cb1f34bfce092260e0a6fd Mon Sep 17 00:00:00 2001 From: Mihai Criveti Date: Sun, 10 Aug 2025 11:59:45 +0100 Subject: [PATCH 08/11] Fixing the success rate Signed-off-by: Mihai Criveti --- mcpgateway/main.py | 74 +++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 67 insertions(+), 7 deletions(-) diff --git a/mcpgateway/main.py b/mcpgateway/main.py index af6ddab7..0c1528b1 100644 --- a/mcpgateway/main.py +++ b/mcpgateway/main.py @@ -29,6 +29,7 @@ import asyncio from contextlib import asynccontextmanager import json +import time from typing import Any, AsyncIterator, Dict, List, Optional, Union from urllib.parse import urlparse, urlunparse @@ -52,7 +53,7 @@ from fastapi.staticfiles import StaticFiles from fastapi.templating import Jinja2Templates from pydantic import ValidationError -from sqlalchemy import text +from sqlalchemy import select, text from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import Session from starlette.middleware.base import BaseHTTPMiddleware @@ -64,7 +65,8 @@ from mcpgateway.bootstrap_db import main as bootstrap_db from mcpgateway.cache import ResourceCache, SessionRegistry from mcpgateway.config import jsonpath_modifier, settings -from mcpgateway.db import refresh_slugs_on_startup, SessionLocal +from mcpgateway.db import Prompt as DbPrompt +from mcpgateway.db import PromptMetric, refresh_slugs_on_startup, SessionLocal from mcpgateway.handlers.sampling import SamplingHandler from mcpgateway.models import ( InitializeRequest, @@ -1782,15 +1784,44 @@ async def get_prompt( Rendered prompt or metadata. """ logger.debug(f"User: {user} requested prompt: {name} with args={args}") + start_time = time.monotonic() + success = False + error_message = None + result = None + try: PromptExecuteArgs(args=args) - return await prompt_service.get_prompt(db, name, args) + result = await prompt_service.get_prompt(db, name, args) + success = True + logger.debug(f"Prompt execution successful for '{name}'") except Exception as ex: + error_message = str(ex) logger.error(f"Could not retrieve prompt {name}: {ex}") if isinstance(ex, (ValueError, PromptError)): - return JSONResponse(content={"message": "Prompt execution arguments contains HTML tags that may cause security issues"}, status_code=422) - if isinstance(ex, PluginViolationError): - return JSONResponse(content={"message": "Prompt execution arguments contains HTML tags that may cause security issues", "details": ex.message}, status_code=422) + result = JSONResponse(content={"message": "Prompt execution arguments contains HTML tags that may cause security issues"}, status_code=422) + elif isinstance(ex, PluginViolationError): + result = JSONResponse(content={"message": "Prompt execution arguments contains HTML tags that may cause security issues", "details": ex.message}, status_code=422) + else: + raise + + # Record metrics (moved outside try/except/finally to ensure it runs) + end_time = time.monotonic() + response_time = end_time - start_time + + # Get the prompt from database to get its ID + prompt = db.execute(select(DbPrompt).where(DbPrompt.name == name)).scalar_one_or_none() + + if prompt: + metric = PromptMetric( + prompt_id=prompt.id, + response_time=response_time, + is_success=success, + error_message=error_message, + ) + db.add(metric) + db.commit() + + return result @prompt_router.get("/{name}") @@ -1812,7 +1843,36 @@ async def get_prompt_no_args( The prompt template information """ logger.debug(f"User: {user} requested prompt: {name} with no arguments") - return await prompt_service.get_prompt(db, name, {}) + start_time = time.monotonic() + success = False + error_message = None + result = None + + try: + result = await prompt_service.get_prompt(db, name, {}) + success = True + except Exception as ex: + error_message = str(ex) + raise + + # Record metrics + end_time = time.monotonic() + response_time = end_time - start_time + + # Get the prompt from database to get its ID + prompt = db.execute(select(DbPrompt).where(DbPrompt.name == name)).scalar_one_or_none() + + if prompt: + metric = PromptMetric( + prompt_id=prompt.id, + response_time=response_time, + is_success=success, + error_message=error_message, + ) + db.add(metric) + db.commit() + + return result @prompt_router.put("/{name}", response_model=PromptRead) From 9e7ae156d572384b5b5491dcd91c515a9c5f905d Mon Sep 17 00:00:00 2001 From: Mihai Criveti Date: Sun, 10 Aug 2025 12:07:44 +0100 Subject: [PATCH 09/11] Fix metrics display and calculations for tools, resources, and prompts - Fixed SQLite boolean comparisons (using == 1 instead of .is_(True)) - Fixed Float type casting in SQLAlchemy queries - Added division by zero protection - Fixed frontend JavaScript to handle camelCase field names from API - Added prompt metrics recording (partial - needs more work for auto-recording) - Eliminated code duplication with shared metrics_common.py utility - Fixed all pylint and flake8 issues The metrics now correctly display success rates and execution counts for all entity types. Signed-off-by: Mihai Criveti --- mcpgateway/main.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/mcpgateway/main.py b/mcpgateway/main.py index 0c1528b1..7e5ca752 100644 --- a/mcpgateway/main.py +++ b/mcpgateway/main.py @@ -1782,6 +1782,9 @@ async def get_prompt( Returns: Rendered prompt or metadata. + + Raises: + Exception: Re-raised if not a handled exception type. """ logger.debug(f"User: {user} requested prompt: {name} with args={args}") start_time = time.monotonic() @@ -1841,6 +1844,9 @@ async def get_prompt_no_args( Returns: The prompt template information + + Raises: + Exception: Re-raised from prompt service. """ logger.debug(f"User: {user} requested prompt: {name} with no arguments") start_time = time.monotonic() From e243a1157609c03c017ab799285a11325c8de2c3 Mon Sep 17 00:00:00 2001 From: Mihai Criveti Date: Sun, 10 Aug 2025 12:09:07 +0100 Subject: [PATCH 10/11] Fixing lint-web Signed-off-by: Mihai Criveti --- mcpgateway/static/admin.js | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/mcpgateway/static/admin.js b/mcpgateway/static/admin.js index dd6568ad..73bb501e 100644 --- a/mcpgateway/static/admin.js +++ b/mcpgateway/static/admin.js @@ -1335,7 +1335,8 @@ function calculateSuccessRate(item) { return Math.round(item.successRate); } // Fallback for legacy format (if needed) - const total = item.execution_count || item.executions || item.executionCount || 0; + const total = + item.execution_count || item.executions || item.executionCount || 0; const successful = item.successful_count || item.successfulExecutions || 0; return total > 0 ? Math.round((successful / total) * 100) : 0; } @@ -1632,7 +1633,12 @@ function exportMetricsToCSV(topData) { type, index + 1, `"${escapeHtml(item.name || "Unknown")}"`, - formatNumber(item.executionCount || item.execution_count || item.executions || 0), + formatNumber( + item.executionCount || + item.execution_count || + item.executions || + 0, + ), item.avg_response_time || item.avgResponseTime ? `${Math.round(item.avg_response_time || item.avgResponseTime)}ms` : "N/A", From d2b8bee876a979caf1b82518a1e99b6ae1b305f1 Mon Sep 17 00:00:00 2001 From: Mihai Criveti Date: Sun, 10 Aug 2025 14:19:03 +0100 Subject: [PATCH 11/11] Improve test coverage Signed-off-by: Mihai Criveti --- .../services/test_resource_service.py | 123 ++++++++++ .../mcpgateway/services/test_tool_service.py | 218 ++++++++++++++++++ 2 files changed, 341 insertions(+) diff --git a/tests/unit/mcpgateway/services/test_resource_service.py b/tests/unit/mcpgateway/services/test_resource_service.py index 1504d43c..b4da0575 100644 --- a/tests/unit/mcpgateway/services/test_resource_service.py +++ b/tests/unit/mcpgateway/services/test_resource_service.py @@ -1298,5 +1298,128 @@ async def test_update_resource_error(self, resource_service, mock_db, mock_resou mock_db.rollback.assert_called_once() +class TestResourceServiceMetricsExtended: + """Extended tests for resource service metrics.""" + + @pytest.mark.asyncio + async def test_list_resources_with_tags(self, resource_service, mock_db, mock_resource): + """Test listing resources with tag filtering.""" + from sqlalchemy import func + + # Mock query chain + mock_query = MagicMock() + mock_query.where.return_value = mock_query + mock_db.execute.return_value.scalars.return_value.all.return_value = [mock_resource] + + with patch("mcpgateway.services.resource_service.select", return_value=mock_query): + with patch("mcpgateway.services.resource_service.func") as mock_func: + mock_func.json_contains.return_value = MagicMock() + mock_func.or_.return_value = MagicMock() + + result = await resource_service.list_resources( + mock_db, tags=["test", "production"] + ) + + # Verify tag filtering was applied + assert mock_func.json_contains.call_count == 2 + mock_func.or_.assert_called_once() + assert len(result) == 1 + + @pytest.mark.asyncio + async def test_subscribe_events_with_uri(self, resource_service): + """Test subscribing to events for specific URI.""" + test_uri = "test://resource" + test_event = {"type": "resource_updated", "data": {"uri": test_uri}} + + # Start subscription + subscriber = resource_service.subscribe_events(uri=test_uri) + subscription_task = asyncio.create_task(subscriber.__anext__()) + + # Allow subscription to register + await asyncio.sleep(0.01) + + # Publish event to specific URI + await resource_service._publish_event(test_uri, test_event) + + # Receive event + received = await asyncio.wait_for(subscription_task, timeout=0.1) + assert received == test_event + + # Clean up + await subscriber.aclose() + + # Verify cleanup + assert test_uri not in resource_service._event_subscribers + + @pytest.mark.asyncio + async def test_subscribe_events_global(self, resource_service): + """Test subscribing to all events.""" + test_event = {"type": "resource_created", "data": {"uri": "any://resource"}} + + # Start global subscription + subscriber = resource_service.subscribe_events(uri=None) + subscription_task = asyncio.create_task(subscriber.__anext__()) + + await asyncio.sleep(0.01) + + # Publish event to any URI + await resource_service._publish_event("any://resource", test_event) + + received = await asyncio.wait_for(subscription_task, timeout=0.1) + assert received == test_event + + await subscriber.aclose() + + # Verify cleanup of global subscribers + assert "*" not in resource_service._event_subscribers + + @pytest.mark.asyncio + async def test_read_template_resource_not_found(self, resource_service): + """Test reading template resource that doesn't exist.""" + with pytest.raises(ResourceNotFoundError, match="No template matches URI"): + await resource_service._read_template_resource("template://nonexistent/{id}") + + @pytest.mark.asyncio + async def test_get_top_resources(self, resource_service, mock_db): + """Test getting top performing resources.""" + # Mock query results + mock_result1 = MagicMock() + mock_result1.id = 1 + mock_result1.name = "resource1" + mock_result1.execution_count = 10 + mock_result1.avg_response_time = 1.5 + mock_result1.success_rate = 100.0 + mock_result1.last_execution = "2025-01-10T12:00:00" + + mock_result2 = MagicMock() + mock_result2.id = 2 + mock_result2.name = "resource2" + mock_result2.execution_count = 7 + mock_result2.avg_response_time = 2.3 + mock_result2.success_rate = 71.43 + mock_result2.last_execution = "2025-01-10T11:00:00" + + # Mock the query chain + mock_query = MagicMock() + mock_query.outerjoin.return_value = mock_query + mock_query.group_by.return_value = mock_query + mock_query.order_by.return_value = mock_query + mock_query.limit.return_value = mock_query + mock_query.all.return_value = [mock_result1, mock_result2] + + mock_db.query.return_value = mock_query + + result = await resource_service.get_top_resources(mock_db, limit=2) + + assert len(result) == 2 + assert result[0].name == "resource1" + assert result[0].execution_count == 10 + assert result[0].success_rate == 100.0 + + assert result[1].name == "resource2" + assert result[1].execution_count == 7 + assert result[1].success_rate == pytest.approx(71.43, rel=0.01) + + if __name__ == "__main__": pytest.main([__file__, "-v"]) diff --git a/tests/unit/mcpgateway/services/test_tool_service.py b/tests/unit/mcpgateway/services/test_tool_service.py index 1e941eb2..db8158ce 100644 --- a/tests/unit/mcpgateway/services/test_tool_service.py +++ b/tests/unit/mcpgateway/services/test_tool_service.py @@ -29,6 +29,7 @@ ToolNotFoundError, ToolResult, ToolService, + ToolValidationError, ) from mcpgateway.utils.services_auth import encode_auth @@ -1650,3 +1651,220 @@ async def test_reset_metrics(self, tool_service, test_db): # Verify DB operations with tool_id test_db.execute.assert_called_once() test_db.commit.assert_called_once() + + async def test_record_tool_metric(self, tool_service, mock_tool): + """Test recording tool invocation metrics.""" + # Set up test data + start_time = 100.0 + success = True + error_message = None + + # Mock database + mock_db = MagicMock() + + # Mock time.monotonic to return a consistent value + with patch("mcpgateway.services.tool_service.time.monotonic", return_value=105.0): + # Mock ToolMetric class + with patch("mcpgateway.services.tool_service.ToolMetric") as MockToolMetric: + mock_metric_instance = MagicMock() + MockToolMetric.return_value = mock_metric_instance + + # Call the method + await tool_service._record_tool_metric(mock_db, mock_tool, start_time, success, error_message) + + # Verify ToolMetric was created with correct data + MockToolMetric.assert_called_once_with( + tool_id=mock_tool.id, + response_time=5.0, # 105.0 - 100.0 + is_success=True, + error_message=None + ) + + # Verify DB operations + mock_db.add.assert_called_once_with(mock_metric_instance) + mock_db.commit.assert_called_once() + + async def test_record_tool_metric_with_error(self, tool_service, mock_tool): + """Test recording tool invocation metrics with error.""" + start_time = 100.0 + success = False + error_message = "Connection timeout" + + # Mock database + mock_db = MagicMock() + + with patch("mcpgateway.services.tool_service.time.monotonic", return_value=102.5): + with patch("mcpgateway.services.tool_service.ToolMetric") as MockToolMetric: + mock_metric_instance = MagicMock() + MockToolMetric.return_value = mock_metric_instance + + await tool_service._record_tool_metric(mock_db, mock_tool, start_time, success, error_message) + + # Verify ToolMetric was created with error data + MockToolMetric.assert_called_once_with( + tool_id=mock_tool.id, + response_time=2.5, + is_success=False, + error_message="Connection timeout" + ) + + mock_db.add.assert_called_once_with(mock_metric_instance) + mock_db.commit.assert_called_once() + + async def test_aggregate_metrics(self, tool_service): + """Test aggregating metrics across all tools.""" + # Mock database + mock_db = MagicMock() + + # Create a mock that returns scalar values + mock_execute_result = MagicMock() + mock_execute_result.scalar.side_effect = [ + 10, # total count + 8, # successful count + 2, # failed count + 0.5, # min response time + 5.0, # max response time + 2.3, # avg response time + "2025-01-10T12:00:00" # last execution time + ] + mock_db.execute.return_value = mock_execute_result + + result = await tool_service.aggregate_metrics(mock_db) + + assert result == { + "total_executions": 10, + "successful_executions": 8, + "failed_executions": 2, + "failure_rate": 0.2, # 2/10 + "min_response_time": 0.5, + "max_response_time": 5.0, + "avg_response_time": 2.3, + "last_execution_time": "2025-01-10T12:00:00" + } + + # Verify all expected queries were made + assert mock_db.execute.call_count == 7 + + async def test_aggregate_metrics_no_data(self, tool_service): + """Test aggregating metrics when no data exists.""" + # Mock database + mock_db = MagicMock() + + # Create a mock that returns scalar values + mock_execute_result = MagicMock() + mock_execute_result.scalar.side_effect = [ + 0, # total count + 0, # successful count + 0, # failed count + None, # min response time + None, # max response time + None, # avg response time + None # last execution time + ] + mock_db.execute.return_value = mock_execute_result + + result = await tool_service.aggregate_metrics(mock_db) + + assert result == { + "total_executions": 0, + "successful_executions": 0, + "failed_executions": 0, + "failure_rate": 0.0, + "min_response_time": None, + "max_response_time": None, + "avg_response_time": None, + "last_execution_time": None + } + + async def test_validate_tool_url_success(self, tool_service): + """Test successful tool URL validation.""" + # Mock successful HTTP response + mock_response = MagicMock() + mock_response.raise_for_status = MagicMock() + tool_service._http_client.get.return_value = mock_response + + # Should not raise any exception + await tool_service._validate_tool_url("http://example.com/tool") + + tool_service._http_client.get.assert_called_once_with("http://example.com/tool") + mock_response.raise_for_status.assert_called_once() + + async def test_validate_tool_url_failure(self, tool_service): + """Test tool URL validation failure.""" + # Mock HTTP error + tool_service._http_client.get.side_effect = Exception("Connection refused") + + with pytest.raises(ToolValidationError, match="Failed to validate tool URL: Connection refused"): + await tool_service._validate_tool_url("http://example.com/tool") + + async def test_check_tool_health_success(self, tool_service, mock_tool): + """Test successful tool health check.""" + mock_response = MagicMock() + mock_response.is_success = True + tool_service._http_client.get.return_value = mock_response + + result = await tool_service._check_tool_health(mock_tool) + + assert result is True + tool_service._http_client.get.assert_called_once_with(mock_tool.url) + + async def test_check_tool_health_failure(self, tool_service, mock_tool): + """Test failed tool health check.""" + mock_response = MagicMock() + mock_response.is_success = False + tool_service._http_client.get.return_value = mock_response + + result = await tool_service._check_tool_health(mock_tool) + + assert result is False + + async def test_check_tool_health_exception(self, tool_service, mock_tool): + """Test tool health check with exception.""" + tool_service._http_client.get.side_effect = Exception("Network error") + + result = await tool_service._check_tool_health(mock_tool) + + assert result is False + + async def test_subscribe_events(self, tool_service): + """Test event subscription mechanism.""" + # Create an event to publish + test_event = {"type": "test_event", "data": {"id": 1}} + + # Start subscription in background + subscriber = tool_service.subscribe_events() + subscription_task = asyncio.create_task(subscriber.__anext__()) + + # Give a moment for subscription to be registered + await asyncio.sleep(0.01) + + # Publish event + await tool_service._publish_event(test_event) + + # Get the event + received_event = await subscription_task + assert received_event == test_event + + # Clean up + await subscriber.aclose() + + async def test_notify_tool_added(self, tool_service, mock_tool): + """Test notification when tool is added.""" + with patch.object(tool_service, '_publish_event', new_callable=AsyncMock) as mock_publish: + await tool_service._notify_tool_added(mock_tool) + + mock_publish.assert_called_once() + event = mock_publish.call_args[0][0] + assert event["type"] == "tool_added" + assert event["data"]["id"] == mock_tool.id + assert event["data"]["name"] == mock_tool.name + + async def test_notify_tool_removed(self, tool_service, mock_tool): + """Test notification when tool is removed.""" + with patch.object(tool_service, '_publish_event', new_callable=AsyncMock) as mock_publish: + await tool_service._notify_tool_removed(mock_tool) + + mock_publish.assert_called_once() + event = mock_publish.call_args[0][0] + assert event["type"] == "tool_removed" + assert event["data"]["id"] == mock_tool.id