|
34 | 34 | create_engine,
|
35 | 35 | func,
|
36 | 36 | select,
|
| 37 | + make_url, |
37 | 38 | )
|
38 | 39 | from sqlalchemy.event import listen
|
39 | 40 | from sqlalchemy.exc import SQLAlchemyError
|
|
49 | 50 | from mcpgateway.config import settings
|
50 | 51 | from mcpgateway.types import ResourceContent
|
51 | 52 |
|
52 |
| -base_connect_args = {} |
53 |
| -# Add the SQLite-only flag when appropriate |
54 |
| -if settings.database_url.startswith("sqlite"): |
55 |
| - base_connect_args["check_same_thread"] = False |
56 |
| -elif settings.database_url.startswith("postgres"): |
57 |
| - base_connect_args = { |
58 |
| - "keepalives": 1, |
59 |
| - "keepalives_idle": 30, |
60 |
| - "keepalives_interval": 5, |
61 |
| - "keepalives_count": 5, |
62 |
| - } |
63 |
| - |
64 |
| -# Create SQLAlchemy engine with connection pooling |
| 53 | + |
| 54 | +# --------------------------------------------------------------------------- |
| 55 | +# 1. Parse the URL so we can inspect backend ("postgresql", "sqlite", …) |
| 56 | +# and the specific driver ("psycopg2", "asyncpg", empty string = default). |
| 57 | +# --------------------------------------------------------------------------- |
| 58 | +url = make_url(settings.database_url) |
| 59 | +backend = url.get_backend_name() # e.g. 'postgresql', 'sqlite' |
| 60 | +driver = url.get_driver_name() or "default" |
| 61 | + |
| 62 | +# Start with an empty dict and add options only when the driver can accept |
| 63 | +# them; this prevents unexpected TypeError at connect time. |
| 64 | +connect_args: dict[str, object] = {} |
| 65 | + |
| 66 | +# --------------------------------------------------------------------------- |
| 67 | +# 2. PostgreSQL (synchronous psycopg2 only) |
| 68 | +# The keep-alive parameters below are recognised exclusively by libpq / |
| 69 | +# psycopg2 and let the kernel detect broken network links quickly. |
| 70 | +# --------------------------------------------------------------------------- |
| 71 | +if backend == "postgresql" and driver in ("psycopg2", "default", ""): |
| 72 | + connect_args.update( |
| 73 | + keepalives=1, # enable TCP keep-alive probes |
| 74 | + keepalives_idle=30, # seconds of idleness before first probe |
| 75 | + keepalives_interval=5, # seconds between probes |
| 76 | + keepalives_count=5, # drop the link after N failed probes |
| 77 | + ) |
| 78 | + |
| 79 | +# --------------------------------------------------------------------------- |
| 80 | +# 3. SQLite (optional) – only one extra flag and it is *SQLite-specific*. |
| 81 | +# --------------------------------------------------------------------------- |
| 82 | +elif backend == "sqlite": |
| 83 | + # Allow pooled connections to hop across threads. |
| 84 | + connect_args["check_same_thread"] = False |
| 85 | + |
| 86 | +# 4. Other backends (MySQL, MSSQL, etc.) leave `connect_args` empty. |
| 87 | + |
| 88 | +# --------------------------------------------------------------------------- |
| 89 | +# 5. Build the Engine with a single, clean connect_args mapping. |
| 90 | +# --------------------------------------------------------------------------- |
65 | 91 | engine = create_engine(
|
66 | 92 | settings.database_url,
|
67 |
| - pool_pre_ping=True, |
| 93 | + pool_pre_ping=True, # quick liveness check per checkout |
68 | 94 | pool_size=settings.db_pool_size,
|
69 | 95 | max_overflow=settings.db_max_overflow,
|
70 | 96 | pool_timeout=settings.db_pool_timeout,
|
71 | 97 | pool_recycle=settings.db_pool_recycle,
|
72 |
| - connect_args=base_connect_args |
| 98 | + connect_args=connect_args, |
73 | 99 | )
|
74 | 100 |
|
75 | 101 | # Session factory
|
|
0 commit comments