From 22154137a79fb9e6cc5f3c8751acc09188c257eb Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Tue, 16 Sep 2025 18:56:28 +0000 Subject: [PATCH] mcp: add a benchmark for the MemoryEventStore; remove validation Add a new benchmark measuring the append and purge performance of the MemoryEventStore. This benchmark revealed that the store is orders of magnitude slower than it should be due to conservative validation (hugely so: 300KB/s vs 568MB/s). Turn off this validation by default. For #190 --- mcp/event.go | 4 ++-- mcp/event_test.go | 44 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 2 deletions(-) diff --git a/mcp/event.go b/mcp/event.go index 0dd8734b..d309c4e0 100644 --- a/mcp/event.go +++ b/mcp/event.go @@ -23,8 +23,8 @@ import ( ) // If true, MemoryEventStore will do frequent validation to check invariants, slowing it down. -// Remove when we're confident in the code. -const validateMemoryEventStore = true +// Enable for debugging. +const validateMemoryEventStore = false // An Event is a server-sent event. // See https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#fields. diff --git a/mcp/event_test.go b/mcp/event_test.go index 601e8300..20808c73 100644 --- a/mcp/event_test.go +++ b/mcp/event_test.go @@ -10,6 +10,7 @@ import ( "slices" "strings" "testing" + "time" ) func TestScanEvents(t *testing.T) { @@ -252,3 +253,46 @@ func TestMemoryEventStoreAfter(t *testing.T) { }) } } + +func BenchmarkMemoryEventStore(b *testing.B) { + // Benchmark with various settings for event store size, number of session, + // and payload size. + // + // Assume a small number of streams per session, which is probably realistic. + tests := []struct { + name string + limit int + sessions int + datasize int + }{ + {"1KB", 1024, 1, 16}, + {"1MB", 1024 * 1024, 10, 16}, + {"10MB", 10 * 1024 * 1024, 100, 16}, + {"10MB_big", 10 * 1024 * 1024, 1000, 128}, + } + + for _, test := range tests { + b.Run(test.name, func(b *testing.B) { + store := NewMemoryEventStore(nil) + store.SetMaxBytes(test.limit) + ctx := context.Background() + sessionIDs := make([]string, test.sessions) + streamIDs := make([][3]StreamID, test.sessions) + for i := range sessionIDs { + sessionIDs[i] = fmt.Sprint(i) + for j := range 3 { + streamIDs[i][j] = StreamID(randText()) + } + } + payload := make([]byte, test.datasize) + start := time.Now() + b.ResetTimer() + for i := 0; i < b.N; i++ { + sessionID := sessionIDs[i%len(sessionIDs)] + streamID := streamIDs[i%len(sessionIDs)][i%3] + store.Append(ctx, sessionID, streamID, payload) + } + b.ReportMetric(float64(test.datasize)*float64(b.N)/time.Since(start).Seconds(), "bytes/s") + }) + } +}