Skip to content

Commit f70b50e

Browse files
fixing error
Signed-off-by: IrushaBasukala <[email protected]>
1 parent 1d7bafa commit f70b50e

File tree

13 files changed

+406
-97
lines changed

13 files changed

+406
-97
lines changed

MANIFEST.in

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ include Containerfile.lite
1313
include __init__
1414
include alembic.ini
1515
include tox.ini
16+
include alembic/README
1617

1718
# 2️⃣ Top-level config, examples and helper scripts
1819
include *.py

mcpgateway/admin.py

Lines changed: 25 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3658,25 +3658,45 @@ async def admin_delete_root(uri: str, request: Request, user: str = Depends(requ
36583658

36593659
# # Return actual Pydantic model instances
36603660
# return {
3661-
# "tools": tool_metrics,
3661+
# "tools": tool_metrics,
36623662
# "resources": resource_metrics,
36633663
# "servers": server_metrics,
36643664
# "prompts": prompt_metrics,
36653665
# }
36663666

3667+
36673668
@admin_router.get("/metrics")
36683669
async def get_aggregated_metrics(db: Session = Depends(get_db)) -> Dict[str, Any]:
3670+
"""Retrieve aggregated metrics and top performers for all entity types.
3671+
3672+
This endpoint collects usage metrics and top-performing entities for tools,
3673+
resources, prompts, and servers by calling the respective service methods.
3674+
The results are compiled into a dictionary for administrative monitoring.
3675+
3676+
Args:
3677+
db (Session): Database session dependency for querying metrics.
3678+
3679+
Returns:
3680+
Dict[str, Any]: A dictionary containing aggregated metrics and top performers
3681+
for tools, resources, prompts, and servers. The structure includes:
3682+
- 'tools': Metrics for tools.
3683+
- 'resources': Metrics for resources.
3684+
- 'prompts': Metrics for prompts.
3685+
- 'servers': Metrics for servers.
3686+
- 'topPerformers': A nested dictionary with top 5 tools, resources, prompts,
3687+
and servers.
3688+
"""
36693689
metrics = {
3670-
"tools": await tool_service.aggregate_metrics(db),
3690+
"tools": await tool_service.aggregate_metrics(db),
36713691
"resources": await resource_service.aggregate_metrics(db),
36723692
"prompts": await prompt_service.aggregate_metrics(db),
3673-
"servers": await server_service.aggregate_metrics(db),
3693+
"servers": await server_service.aggregate_metrics(db),
36743694
"topPerformers": {
36753695
"tools": await tool_service.get_top_tools(db, limit=5),
36763696
"resources": await resource_service.get_top_resources(db, limit=5),
36773697
"prompts": await prompt_service.get_top_prompts(db, limit=5),
3678-
"servers": await server_service.get_top_servers(db, limit=5)
3679-
}
3698+
"servers": await server_service.get_top_servers(db, limit=5),
3699+
},
36803700
}
36813701
return metrics
36823702

mcpgateway/alembic/README.md

Lines changed: 172 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,172 @@
1+
# Alembic Migration Guide for `mcpgateway`
2+
3+
> Creating, applying, and managing schema migrations with Alembic.
4+
5+
---
6+
7+
## Table of Contents
8+
9+
1. [Why Alembic?](#why-alembic)
10+
2. [Prerequisites](#prerequisites)
11+
3. [Directory Layout](#directory-layout)
12+
4. [Everyday Workflow](#everyday-workflow)
13+
5. [Helpful Make Targets](#helpful-make-targets)
14+
6. [Troubleshooting](#troubleshooting)
15+
7. [Further Reading](#further-reading)
16+
17+
---
18+
19+
## Why Alembic?
20+
21+
- **Versioned DDL** - Revisions are timestamped, diff-able, and reversible.
22+
- **Autogeneration** - Detects model vs. DB drift and writes `op.create_table`, `op.add_column`, etc.
23+
- **Multi-DB Support** - Works with SQLite, PostgreSQL, MySQL-anything SQLAlchemy supports.
24+
- **Zero Runtime Cost** - Only runs when you call it (dev, CI, deploy).
25+
26+
---
27+
28+
## Prerequisites
29+
30+
```bash
31+
# Activate your virtual environment first
32+
pip install --upgrade alembic
33+
```
34+
35+
You do not need to set up `alembic.ini`, `env.py`, or metadata wiring - they're already configured.
36+
37+
---
38+
39+
## Directory Layout
40+
41+
```
42+
alembic.ini
43+
alembic/
44+
├── env.py
45+
├── script.py.mako
46+
└── versions/
47+
├── 20250626235501_initial_schema.py
48+
└── ...
49+
```
50+
51+
* `alembic.ini`: Configuration file
52+
* `env.py`: Connects Alembic to your models and DB settings
53+
* `script.py.mako`: Template for new revisions (keep this!)
54+
* `versions/`: Contains all migration scripts
55+
56+
---
57+
58+
## Everyday Workflow
59+
60+
> **1 Edit → 2 Revision → 3 Upgrade**
61+
62+
| Step | What you do |
63+
| ------------------------ | ----------------------------------------------------------------------------- |
64+
| **1. Change models** | Modify SQLAlchemy models in `mcpgateway.db` or its submodules. |
65+
| **2. Generate revision** | Run: `MSG="add users table"` then `alembic revision --autogenerate -m "$MSG"` |
66+
| **3. Review** | Open the new file in `alembic/versions/`. Verify the operations are correct. |
67+
| **4. Upgrade DB** | Run: `alembic upgrade head` |
68+
| **5. Commit** | Run: `git add alembic/versions/*.py` |
69+
70+
### Other Common Commands
71+
72+
```bash
73+
alembic -c mcpgateway/alembic.ini current # Show current DB revision
74+
alembic history --verbose # Show all migrations and their order
75+
alembic downgrade -1 # Roll back one revision
76+
alembic downgrade <rev> # Roll back to a specific revision hash
77+
```
78+
79+
---
80+
81+
## ✅ Make Targets: Alembic Migration Commands
82+
83+
These targets help you manage database schema migrations using Alembic.
84+
85+
> You must have a valid `alembic/` setup and a working SQLAlchemy model base (`Base.metadata`).
86+
87+
---
88+
89+
### 💡 List all available targets (with help)
90+
91+
```bash
92+
make help
93+
```
94+
95+
This will include the Alembic section:
96+
97+
```
98+
# 🛢️ Alembic tasks
99+
db-new Autogenerate revision (MSG="title")
100+
db-up Upgrade DB to head
101+
db-down Downgrade one step (REV=-1 or hash)
102+
db-current Show current DB revision
103+
db-history List the migration graph
104+
```
105+
106+
---
107+
108+
### 🔨 Commands
109+
110+
| Command | Description |
111+
| -------------------------- | ------------------------------------------------------ |
112+
| `make db-new MSG="..."` | Generate a new migration based on model changes. |
113+
| `make db-up` | Apply all unapplied migrations. |
114+
| `make db-down` | Roll back the latest migration (`REV=-1` by default). |
115+
| `make db-down REV=abc1234` | Roll back to a specific revision by hash. |
116+
| `make db-current` | Print the current revision ID applied to the database. |
117+
| `make db-history` | Show the full migration history and graph. |
118+
119+
---
120+
121+
### 📌 Examples
122+
123+
```bash
124+
# Create a new migration with a custom message
125+
make db-new MSG="add users table"
126+
127+
# Apply it to the database
128+
make db-up
129+
130+
# Downgrade the last migration
131+
make db-down
132+
133+
# Downgrade to a specific revision
134+
make db-down REV=cf1283d7fa92
135+
136+
# Show the current applied revision
137+
make db-current
138+
139+
# Show all migration history
140+
make db-history
141+
```
142+
143+
---
144+
145+
### 🛑 Notes
146+
147+
* You must **edit models first** before `make db-new` generates anything useful.
148+
* Always **review generated migration files** before committing.
149+
* Don't forget to run `make db-up` on CI or deploy if using migrations to manage schema.
150+
151+
---
152+
153+
## Troubleshooting
154+
155+
| Symptom | Cause / Fix |
156+
| ---------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------- |
157+
| **Empty migration (`pass`)** | Alembic couldn't detect models. Make sure all model classes are imported before `Base.metadata` is used (already handled in your `env.py`). |
158+
| **`Can't locate revision ...`** | You deleted or renamed a revision file that the DB is pointing to. Either restore it or run `alembic stamp base` and recreate the revision. |
159+
| **`script.py.mako` missing** | This file is required. Run `alembic init alembic` in a temp folder and copy the missing template into your project. |
160+
| **SQLite foreign key limitations** | SQLite doesn't allow dropping constraints. Use `create table → copy → drop` flow manually, or plan around it. |
161+
| **DB not updating** | Did you forget to run `alembic upgrade head`? Check with `alembic -c mcpgateway/alembic.ini current`. |
162+
| **Wrong DB URL or config errors** | Confirm `settings.database_url` is valid. Check `env.py` and your `.env`/config settings. Alembic ignores `alembic.ini` for URLs in your setup. |
163+
| **Model changes not detected** | Alembic only picks up declarative models in `Base.metadata`. Ensure all models are imported and not behind `if TYPE_CHECKING:` or other lazy imports. |
164+
165+
---
166+
167+
## Further Reading
168+
169+
* Official docs: [https://alembic.sqlalchemy.org](https://alembic.sqlalchemy.org)
170+
* Autogenerate docs: [https://alembic.sqlalchemy.org/en/latest/autogenerate.html](https://alembic.sqlalchemy.org/en/latest/autogenerate.html)
171+
172+
---

mcpgateway/alembic/script.py.mako

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
"""${message}
2+
3+
Revision ID: ${up_revision}
4+
Revises: ${down_revision | comma,n}
5+
Create Date: ${create_date}
6+
7+
"""
8+
from typing import Sequence, Union
9+
10+
from alembic import op
11+
import sqlalchemy as sa
12+
${imports if imports else ""}
13+
14+
# revision identifiers, used by Alembic.
15+
revision: str = ${repr(up_revision)}
16+
down_revision: Union[str, Sequence[str], None] = ${repr(down_revision)}
17+
branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)}
18+
depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
19+
20+
21+
def upgrade() -> None:
22+
"""Upgrade schema."""
23+
${upgrades if upgrades else "pass"}
24+
25+
26+
def downgrade() -> None:
27+
"""Downgrade schema."""
28+
${downgrades if downgrades else "pass"}

mcpgateway/handlers/sampling.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -219,7 +219,7 @@ async def create_message(self, db: Session, request: Dict[str, Any]) -> CreateMe
219219
# TODO: Sample from selected model
220220
# For now return mock response
221221
response = self._mock_sample(messages=messages)
222-
222+
223223
# Convert to result
224224
return CreateMessageResult(
225225
content=TextContent(type="text", text=response),

mcpgateway/migrations/env.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,8 @@
1+
"""Alembic environment configuration for database migrations.
2+
3+
This module sets up the Alembic migration environment, configuring the database connection
4+
and metadata for running migrations in both online and offline modes.
5+
"""
16
from logging.config import fileConfig
27

38
from sqlalchemy import engine_from_config
@@ -73,6 +78,7 @@ def run_migrations_online() -> None:
7378

7479

7580
if context.is_offline_mode():
81+
7682
run_migrations_offline()
7783
else:
7884
run_migrations_online()

mcpgateway/schemas.py

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2571,10 +2571,22 @@ class GatewayTestResponse(BaseModelWithConfigDict):
25712571

25722572

25732573
class TopPerformer(BaseModelWithConfigDict):
2574+
"""Schema for representing top-performing entities with performance metrics.
2575+
2576+
Used to encapsulate metrics for entities such as prompts, resources, servers, or tools,
2577+
including execution count, average response time, success rate, and last execution timestamp.
2578+
2579+
Attributes:
2580+
id (Union[str, int]): Unique identifier for the entity.
2581+
name (str): Name of the entity (e.g., prompt name, resource URI, server name, or tool name).
2582+
execution_count (int): Total number of executions for the entity.
2583+
avg_response_time (Optional[float]): Average response time in seconds, or None if no metrics.
2584+
success_rate (Optional[float]): Success rate percentage, or None if no metrics.
2585+
last_execution (Optional[datetime]): Timestamp of the last execution, or None if no metrics.
2586+
"""
25742587
id: Union[str, int] = Field(..., description="Entity ID")
25752588
name: str = Field(..., description="Entity name")
25762589
execution_count: int = Field(..., description="Number of executions")
25772590
avg_response_time: Optional[float] = Field(None, description="Average response time in seconds")
25782591
success_rate: Optional[float] = Field(None, description="Success rate percentage")
25792592
last_execution: Optional[datetime] = Field(None, description="Timestamp of last execution")
2580-

mcpgateway/services/gateway_service.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -176,9 +176,6 @@ class GatewayService:
176176
- Active/inactive status management
177177
"""
178178

179-
180-
181-
182179
def __init__(self) -> None:
183180
"""Initialize the gateway service.
184181

mcpgateway/services/prompt_service.py

Lines changed: 37 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -133,22 +133,41 @@ async def shutdown(self) -> None:
133133
self._event_subscribers.clear()
134134
logger.info("Prompt service shutdown complete")
135135

136-
137136
async def get_top_prompts(self, db: Session, limit: int = 5) -> List[TopPerformer]:
138-
results = db.query(
139-
DbPrompt.id,
140-
DbPrompt.name,
141-
func.count(PromptMetric.id).label('execution_count'),
142-
func.avg(PromptMetric.response_time).label('avg_response_time'),
143-
(func.sum(case((PromptMetric.is_success, 1), else_=0)) / func.count(PromptMetric.id) * 100).label('success_rate'),
144-
func.max(PromptMetric.timestamp).label('last_execution')
145-
).outerjoin(
146-
PromptMetric
147-
).group_by(
148-
DbPrompt.id, DbPrompt.name
149-
).order_by(
150-
desc('execution_count')
151-
).limit(limit).all()
137+
"""Retrieve the top-performing prompts based on execution count.
138+
139+
Queries the database to get prompts with their metrics, ordered by the number of executions
140+
in descending order. Returns a list of TopPerformer objects containing prompt details and
141+
performance metrics.
142+
143+
Args:
144+
db (Session): Database session for querying prompt metrics.
145+
limit (int, optional): Maximum number of prompts to return. Defaults to 5.
146+
147+
Returns:
148+
List[TopPerformer]: A list of TopPerformer objects, each containing:
149+
- id: Prompt ID.
150+
- name: Prompt name.
151+
- execution_count: Total number of executions.
152+
- avg_response_time: Average response time in seconds, or None if no metrics.
153+
- success_rate: Success rate percentage, or None if no metrics.
154+
- last_execution: Timestamp of the last execution, or None if no metrics.
155+
"""
156+
results = (
157+
db.query(
158+
DbPrompt.id,
159+
DbPrompt.name,
160+
func.count(PromptMetric.id).label("execution_count"),# pylint: disable=not-callable
161+
func.avg(PromptMetric.response_time).label("avg_response_time"),
162+
(func.sum(case((PromptMetric.is_success, 1), else_=0)) / func.count(PromptMetric.id) * 100).label("success_rate"),
163+
func.max(PromptMetric.timestamp).label("last_execution"),
164+
)
165+
.outerjoin(PromptMetric)
166+
.group_by(DbPrompt.id, DbPrompt.name)
167+
.order_by(desc("execution_count"))
168+
.limit(limit)
169+
.all()
170+
)
152171

153172
return [
154173
TopPerformer(
@@ -157,8 +176,9 @@ async def get_top_prompts(self, db: Session, limit: int = 5) -> List[TopPerforme
157176
execution_count=result.execution_count or 0,
158177
avg_response_time=float(result.avg_response_time) if result.avg_response_time else None,
159178
success_rate=float(result.success_rate) if result.success_rate else None,
160-
last_execution=result.last_execution
161-
) for result in results
179+
last_execution=result.last_execution,
180+
)
181+
for result in results
162182
]
163183

164184
def _convert_db_prompt(self, db_prompt: DbPrompt) -> Dict[str, Any]:

0 commit comments

Comments
 (0)