@@ -943,10 +943,34 @@ The streamable HTTP transport supports:
943
943
944
944
### Mounting to an Existing ASGI Server
945
945
946
- > ** Note** : SSE transport is being superseded by [ Streamable HTTP transport] ( https://modelcontextprotocol.io/specification/2025-03-26/basic/transports#streamable-http ) .
947
-
948
946
By default, SSE servers are mounted at ` /sse ` and Streamable HTTP servers are mounted at ` /mcp ` . You can customize these paths using the methods described below.
949
947
948
+ #### Streamable HTTP servers
949
+
950
+ The following example shows how to use ` streamable_http_app() ` , a method that returns a ` Starlette ` application object.
951
+ You can then append additional routes to that application as needed.
952
+
953
+ ``` python
954
+ mcp = FastMCP(" My App" )
955
+
956
+ app = mcp.streamable_http_app()
957
+ # Additional non-MCP routes can be added like so:
958
+ # from starlette.routing import Route
959
+ # app.router.routes.append(Route("/", endpoint=other_route_function))
960
+ ```
961
+
962
+ To customize the route from the default of "/mcp", either specify the ` streamable_http_path ` option for the ` FastMCP ` constructor,
963
+ or set ` FASTMCP_STREAMABLE_HTTP_PATH ` environment variable.
964
+
965
+ Note that in Starlette and FastAPI (which is based on Starlette), the "/mcp" route will redirect to "/mcp/",
966
+ so you may need to use "/mcp/" when pointing MCP clients at your servers.
967
+
968
+ For more information on mounting applications in Starlette, see the [ Starlette documentation] ( https://www.starlette.io/routing/#submounting-routes ) .
969
+
970
+ #### SSE servers
971
+
972
+ > ** Note** : SSE transport is being superseded by [ Streamable HTTP transport] ( https://modelcontextprotocol.io/specification/2025-03-26/basic/transports#streamable-http ) .
973
+
950
974
You can mount the SSE server to an existing ASGI server using the ` sse_app ` method. This allows you to integrate the SSE server with other ASGI applications.
951
975
952
976
``` python
@@ -1013,17 +1037,44 @@ For more information on mounting applications in Starlette, see the [Starlette d
1013
1037
1014
1038
For more control, you can use the low-level server implementation directly. This gives you full access to the protocol and allows you to customize every aspect of your server, including lifecycle management through the lifespan API:
1015
1039
1040
+ <!-- snippet-source examples/snippets/servers/lowlevel/lifespan.py -->
1016
1041
``` python
1017
- from contextlib import asynccontextmanager
1042
+ """
1043
+ Run from the repository root:
1044
+ uv run examples/snippets/servers/lowlevel/lifespan.py
1045
+ """
1046
+
1018
1047
from collections.abc import AsyncIterator
1048
+ from contextlib import asynccontextmanager
1049
+
1050
+ import mcp.server.stdio
1051
+ import mcp.types as types
1052
+ from mcp.server.lowlevel import NotificationOptions, Server
1053
+ from mcp.server.models import InitializationOptions
1054
+
1055
+
1056
+ # Mock database class for example
1057
+ class Database :
1058
+ """ Mock database class for example."""
1019
1059
1020
- from fake_database import Database # Replace with your actual DB type
1060
+ @ classmethod
1061
+ async def connect (cls ) -> " Database" :
1062
+ """ Connect to database."""
1063
+ print (" Database connected" )
1064
+ return cls ()
1021
1065
1022
- from mcp.server import Server
1066
+ async def disconnect (self ) -> None :
1067
+ """ Disconnect from database."""
1068
+ print (" Database disconnected" )
1069
+
1070
+ async def query (self , query_str : str ) -> list[dict[str , str ]]:
1071
+ """ Execute a query."""
1072
+ # Simulate database query
1073
+ return [{" id" : " 1" , " name" : " Example" , " query" : query_str}]
1023
1074
1024
1075
1025
1076
@asynccontextmanager
1026
- async def server_lifespan (server : Server) -> AsyncIterator[dict ]:
1077
+ async def server_lifespan (_server : Server) -> AsyncIterator[dict ]:
1027
1078
""" Manage server startup and shutdown lifecycle."""
1028
1079
# Initialize resources on startup
1029
1080
db = await Database.connect()
@@ -1038,21 +1089,79 @@ async def server_lifespan(server: Server) -> AsyncIterator[dict]:
1038
1089
server = Server(" example-server" , lifespan = server_lifespan)
1039
1090
1040
1091
1041
- # Access lifespan context in handlers
1092
+ @server.list_tools ()
1093
+ async def handle_list_tools () -> list[types.Tool]:
1094
+ """ List available tools."""
1095
+ return [
1096
+ types.Tool(
1097
+ name = " query_db" ,
1098
+ description = " Query the database" ,
1099
+ inputSchema = {
1100
+ " type" : " object" ,
1101
+ " properties" : {" query" : {" type" : " string" , " description" : " SQL query to execute" }},
1102
+ " required" : [" query" ],
1103
+ },
1104
+ )
1105
+ ]
1106
+
1107
+
1042
1108
@server.call_tool ()
1043
- async def query_db (name : str , arguments : dict ) -> list :
1109
+ async def query_db (name : str , arguments : dict ) -> list[types.TextContent]:
1110
+ """ Handle database query tool call."""
1111
+ if name != " query_db" :
1112
+ raise ValueError (f " Unknown tool: { name} " )
1113
+
1114
+ # Access lifespan context
1044
1115
ctx = server.request_context
1045
1116
db = ctx.lifespan_context[" db" ]
1046
- return await db.query(arguments[" query" ])
1117
+
1118
+ # Execute query
1119
+ results = await db.query(arguments[" query" ])
1120
+
1121
+ return [types.TextContent(type = " text" , text = f " Query results: { results} " )]
1122
+
1123
+
1124
+ async def run ():
1125
+ """ Run the server with lifespan management."""
1126
+ async with mcp.server.stdio.stdio_server() as (read_stream, write_stream):
1127
+ await server.run(
1128
+ read_stream,
1129
+ write_stream,
1130
+ InitializationOptions(
1131
+ server_name = " example-server" ,
1132
+ server_version = " 0.1.0" ,
1133
+ capabilities = server.get_capabilities(
1134
+ notification_options = NotificationOptions(),
1135
+ experimental_capabilities = {},
1136
+ ),
1137
+ ),
1138
+ )
1139
+
1140
+
1141
+ if __name__ == " __main__" :
1142
+ import asyncio
1143
+
1144
+ asyncio.run(run())
1047
1145
```
1048
1146
1147
+ _ Full example: [ examples/snippets/servers/lowlevel/lifespan.py] ( https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/servers/lowlevel/lifespan.py ) _
1148
+ <!-- /snippet-source -->
1149
+
1049
1150
The lifespan API provides:
1050
1151
1051
1152
- A way to initialize resources when the server starts and clean them up when it stops
1052
1153
- Access to initialized resources through the request context in handlers
1053
1154
- Type-safe context passing between lifespan and request handlers
1054
1155
1156
+ <!-- snippet-source examples/snippets/servers/lowlevel/basic.py -->
1055
1157
``` python
1158
+ """
1159
+ Run from the repository root:
1160
+ uv run examples/snippets/servers/lowlevel/basic.py
1161
+ """
1162
+
1163
+ import asyncio
1164
+
1056
1165
import mcp.server.stdio
1057
1166
import mcp.types as types
1058
1167
from mcp.server.lowlevel import NotificationOptions, Server
@@ -1064,38 +1173,37 @@ server = Server("example-server")
1064
1173
1065
1174
@server.list_prompts ()
1066
1175
async def handle_list_prompts () -> list[types.Prompt]:
1176
+ """ List available prompts."""
1067
1177
return [
1068
1178
types.Prompt(
1069
1179
name = " example-prompt" ,
1070
1180
description = " An example prompt template" ,
1071
- arguments = [
1072
- types.PromptArgument(
1073
- name = " arg1" , description = " Example argument" , required = True
1074
- )
1075
- ],
1181
+ arguments = [types.PromptArgument(name = " arg1" , description = " Example argument" , required = True )],
1076
1182
)
1077
1183
]
1078
1184
1079
1185
1080
1186
@server.get_prompt ()
1081
- async def handle_get_prompt (
1082
- name : str , arguments : dict[str , str ] | None
1083
- ) -> types.GetPromptResult:
1187
+ async def handle_get_prompt (name : str , arguments : dict[str , str ] | None ) -> types.GetPromptResult:
1188
+ """ Get a specific prompt by name."""
1084
1189
if name != " example-prompt" :
1085
1190
raise ValueError (f " Unknown prompt: { name} " )
1086
1191
1192
+ arg1_value = (arguments or {}).get(" arg1" , " default" )
1193
+
1087
1194
return types.GetPromptResult(
1088
1195
description = " Example prompt" ,
1089
1196
messages = [
1090
1197
types.PromptMessage(
1091
1198
role = " user" ,
1092
- content = types.TextContent(type = " text" , text = " Example prompt text" ),
1199
+ content = types.TextContent(type = " text" , text = f " Example prompt text with argument: { arg1_value } " ),
1093
1200
)
1094
1201
],
1095
1202
)
1096
1203
1097
1204
1098
1205
async def run ():
1206
+ """ Run the basic low-level server."""
1099
1207
async with mcp.server.stdio.stdio_server() as (read_stream, write_stream):
1100
1208
await server.run(
1101
1209
read_stream,
@@ -1112,67 +1220,108 @@ async def run():
1112
1220
1113
1221
1114
1222
if __name__ == " __main__" :
1115
- import asyncio
1116
-
1117
1223
asyncio.run(run())
1118
1224
```
1119
1225
1226
+ _ Full example: [ examples/snippets/servers/lowlevel/basic.py] ( https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/servers/lowlevel/basic.py ) _
1227
+ <!-- /snippet-source -->
1228
+
1120
1229
Caution: The ` uv run mcp run ` and ` uv run mcp dev ` tool doesn't support low-level server.
1121
1230
1122
1231
#### Structured Output Support
1123
1232
1124
1233
The low-level server supports structured output for tools, allowing you to return both human-readable content and machine-readable structured data. Tools can define an ` outputSchema ` to validate their structured output:
1125
1234
1235
+ <!-- snippet-source examples/snippets/servers/lowlevel/structured_output.py -->
1126
1236
``` python
1127
- from types import Any
1237
+ """
1238
+ Run from the repository root:
1239
+ uv run examples/snippets/servers/lowlevel/structured_output.py
1240
+ """
1241
+
1242
+ import asyncio
1243
+ from typing import Any
1128
1244
1245
+ import mcp.server.stdio
1129
1246
import mcp.types as types
1130
- from mcp.server.lowlevel import Server
1247
+ from mcp.server.lowlevel import NotificationOptions, Server
1248
+ from mcp.server.models import InitializationOptions
1131
1249
1132
1250
server = Server(" example-server" )
1133
1251
1134
1252
1135
1253
@server.list_tools ()
1136
1254
async def list_tools () -> list[types.Tool]:
1255
+ """ List available tools with structured output schemas."""
1137
1256
return [
1138
1257
types.Tool(
1139
- name = " calculate " ,
1140
- description = " Perform mathematical calculations " ,
1258
+ name = " get_weather " ,
1259
+ description = " Get current weather for a city " ,
1141
1260
inputSchema = {
1142
1261
" type" : " object" ,
1143
- " properties" : {
1144
- " expression" : {" type" : " string" , " description" : " Math expression" }
1145
- },
1146
- " required" : [" expression" ],
1262
+ " properties" : {" city" : {" type" : " string" , " description" : " City name" }},
1263
+ " required" : [" city" ],
1147
1264
},
1148
1265
outputSchema = {
1149
1266
" type" : " object" ,
1150
1267
" properties" : {
1151
- " result" : {" type" : " number" },
1152
- " expression" : {" type" : " string" },
1268
+ " temperature" : {" type" : " number" , " description" : " Temperature in Celsius" },
1269
+ " condition" : {" type" : " string" , " description" : " Weather condition" },
1270
+ " humidity" : {" type" : " number" , " description" : " Humidity percentage" },
1271
+ " city" : {" type" : " string" , " description" : " City name" },
1153
1272
},
1154
- " required" : [" result " , " expression " ],
1273
+ " required" : [" temperature " , " condition " , " humidity " , " city " ],
1155
1274
},
1156
1275
)
1157
1276
]
1158
1277
1159
1278
1160
1279
@server.call_tool ()
1161
1280
async def call_tool (name : str , arguments : dict[str , Any]) -> dict[str , Any]:
1162
- if name == " calculate" :
1163
- expression = arguments[" expression" ]
1164
- try :
1165
- result = eval (expression) # Use a safe math parser
1166
- structured = {" result" : result, " expression" : expression}
1167
-
1168
- # low-level server will validate structured output against the tool's
1169
- # output schema, and automatically serialize it into a TextContent block
1170
- # for backwards compatibility with pre-2025-06-18 clients.
1171
- return structured
1172
- except Exception as e:
1173
- raise ValueError (f " Calculation error: { str (e)} " )
1281
+ """ Handle tool calls with structured output."""
1282
+ if name == " get_weather" :
1283
+ city = arguments[" city" ]
1284
+
1285
+ # Simulated weather data - in production, call a weather API
1286
+ weather_data = {
1287
+ " temperature" : 22.5 ,
1288
+ " condition" : " partly cloudy" ,
1289
+ " humidity" : 65 ,
1290
+ " city" : city, # Include the requested city
1291
+ }
1292
+
1293
+ # low-level server will validate structured output against the tool's
1294
+ # output schema, and additionally serialize it into a TextContent block
1295
+ # for backwards compatibility with pre-2025-06-18 clients.
1296
+ return weather_data
1297
+ else :
1298
+ raise ValueError (f " Unknown tool: { name} " )
1299
+
1300
+
1301
+ async def run ():
1302
+ """ Run the structured output server."""
1303
+ async with mcp.server.stdio.stdio_server() as (read_stream, write_stream):
1304
+ await server.run(
1305
+ read_stream,
1306
+ write_stream,
1307
+ InitializationOptions(
1308
+ server_name = " structured-output-example" ,
1309
+ server_version = " 0.1.0" ,
1310
+ capabilities = server.get_capabilities(
1311
+ notification_options = NotificationOptions(),
1312
+ experimental_capabilities = {},
1313
+ ),
1314
+ ),
1315
+ )
1316
+
1317
+
1318
+ if __name__ == " __main__" :
1319
+ asyncio.run(run())
1174
1320
```
1175
1321
1322
+ _ Full example: [ examples/snippets/servers/lowlevel/structured_output.py] ( https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/servers/lowlevel/structured_output.py ) _
1323
+ <!-- /snippet-source -->
1324
+
1176
1325
Tools can return data in three ways:
1177
1326
1178
1327
1 . ** Content only** : Return a list of content blocks (default behavior before spec revision 2025-06-18)
0 commit comments