-
Notifications
You must be signed in to change notification settings - Fork 412
Description
Describe the bug
There is a 401 error between the client and server when MCP server restarts and the client was maintaining an existing connection. I can track it down to the local session manager implementation
if !has_session {
// unauthorized
return Ok(Response::builder()
.status(http::StatusCode::UNAUTHORIZED)
.body(Full::new(Bytes::from("Unauthorized: Session not found")).boxed())
.expect("valid response"));
}
im using the StreamableHTTPClient, but looks like it uses some the SSE implementation for managing sessions?
To Reproduce
Steps to reproduce the behavior:
create a mcp streamable http server, create a http streamable mcp client.
connect to the mcp client on local host to the mcp server.
restart the mcp server.
401 errors will appear in the logs.
client:
let transport =
StreamableHttpClientTransport::from_config(StreamableHttpClientTransportConfig {
uri: secrets.mcp_http_uri.into(),
retry_config: Arc::new(ExponentialBackoff::default()),
channel_buffer_capacity: 16,
allow_stateless: true,
auth_header: Option::from(secrets.api_key.clone()),
});
let client_info = ClientInfo {
protocol_version: Default::default(),
capabilities: ClientCapabilities::default(),
client_info: Implementation {
name: "client".to_string(),
title: Option::from("MCP server".to_string()),
version: "0.0.1".to_string(),
icons: None,
website_url: None,
},
};
Arc::new(
client_info
.serve(transport)
.await
.inspect_err(|e| {
tracing::error!("client error: {e:?}");
})
.expect("Failed to create MCP client"),
)
server:
let service = StreamableHttpService::new(
|| Ok(tools::home::Home::new()),
LocalSessionManager::default().into(),
Default::default(),
);
let router = Router::new()
.nest_service("/mcp", service)
.layer(CorsLayer::permissive())
.layer(TraceLayer::new_for_http());
let protected = router.layer(middleware::from_fn_with_state(
token_store.clone(),
nova_utils::token_auth::middleware,
));
let api_routes = Router::new().route("/health", get(health_check));
let app = Router::new()
.nest("/api", api_routes)
.merge(protected)
.with_state(())
.layer(CorsLayer::permissive())
.layer(TraceLayer::new_for_http());
let listener = tokio::net::TcpListener::bind(config.get_addr()).await?;
let cancel_token = CancellationToken::new();
let ct = cancel_token.clone();
tokio::spawn(async move {
match tokio::signal::ctrl_c().await {
Ok(()) => {
println!("Received Ctrl+C, shutting down server...");
cancel_token.cancel();
}
Err(err) => {
eprintln!("Unable to listen for Ctrl+C signal: {}", err);
}
}
});
let service = ct.clone();
tracing::info!("Server started on {}", config.get_addr());
let server = axum::serve(listener, app).with_graceful_shutdown(async move {
ct.cancelled().await;
println!("Server is shutting down...");
});
if let Err(e) = server.await {
eprintln!("Server error: {}", e);
}
println!("Server has been shut down");
service.cancel();
Ok(())
Expected behavior
A new session would be created during the retry mechanism in the SseAutoReconnectStreamState.
Logs
server
2025-11-24T23:58:38.609687Z DEBUG request{method=GET uri=/mcp version=HTTP/1.1}: tower_http::trace::on_request: started processing request
2025-11-24T23:58:38.610130Z DEBUG request{method=GET uri=/mcp version=HTTP/1.1}:request{method=GET uri=/mcp version=HTTP/1.1}: tower_http::trace::on_request: started processing request
2025-11-24T23:58:44.754526Z DEBUG request{method=GET uri=/mcp version=HTTP/1.1}:request{method=GET uri=/mcp version=HTTP/1.1}: tower_http::trace::on_response: finished processing request latency=6144 ms status=401
2025-11-24T23:58:44.754635Z DEBUG request{method=GET uri=/mcp version=HTTP/1.1}: tower_http::trace::on_response: finished processing request latency=6145 ms status=401
2025-11-24T23:58:57.877055Z DEBUG request{method=POST uri=/mcp version=HTTP/1.1}: tower_http::trace::on_request: started processing request
2025-11-24T23:58:57.877318Z DEBUG request{method=POST uri=/mcp version=HTTP/1.1}:request{method=POST uri=/mcp version=HTTP/1.1}: tower_http::trace::on_request: started processing request
2025-11-24T23:58:57.878839Z DEBUG request{method=POST uri=/mcp version=HTTP/1.1}:request{method=POST uri=/mcp version=HTTP/1.1}: tower_http::trace::on_response: finished processing request latency=1 ms status=401
2025-11-24T23:58:57.878917Z DEBUG request{method=POST uri=/mcp version=HTTP/1.1}: tower_http::trace::on_response: finished processing request latency=2 ms status=401
2025-11-24T23:59:00.759253Z DEBUG request{method=GET uri=/mcp version=HTTP/1.1}: tower_http::trace::on_request: started processing request
2025-11-24T23:59:00.759490Z DEBUG request{method=GET uri=/mcp version=HTTP/1.1}:request{method=GET uri=/mcp version=HTTP/1.1}: tower_http::trace::on_request: started processing request
2025-11-24T23:59:00.759628Z DEBUG request{method=GET uri=/mcp version=HTTP/1.1}:request{method=GET uri=/mcp version=HTTP/1.1}: tower_http::trace::on_response: finished processing request latency=0 ms status=401
2025-11-24T23:59:00.759675Z DEBUG request{method=GET uri=/mcp version=HTTP/1.1}: tower_http::trace::on_response: finished processing request latency=0 ms status=401
client
2025-11-24T23:59:00.757581Z DEBUG hyper_util::client::legacy::pool: reuse idle connection for ("http", localhost:8000)
2025-11-24T23:59:00.760302Z DEBUG hyper_util::client::legacy::pool: pooling idle connection for ("http", localhost:8000)
2025-11-24T23:59:00.760398Z DEBUG rmcp::transport::common::client_side_sse: retry sse stream error: Client error: HTTP status client error (401 Unauthorized) for url (http://localhost:8000/mcp)
Additional context
if this isn't a bug... and expected behavior. what is the recommended way to re-create long lived client sessions to the MCP server when the MCP server needs to get updated / restarted ?