Skip to content

Commit 3dea77f

Browse files
committed
limiter: Short-circuit when no rates or connections are configured
Every Teleport service creates a Limiter unconditionally, even when no `connection_limits` are set. Previously, `NewRateLimiter` injected a fake default rate of 100M req/s to satisfy a non-empty-rates invariant in `TokenLimiter`. Every request still paid the full cost: extract client IP, acquire mutex, FnCache lookup, and token bucket consume - all for a rate that never rejects. Remove the fake default rate and add early returns throughout the limiter stack: `RateLimiter.RegisterRequest` returns immediately when the effective rate set is empty, `TokenLimiter.ServeHTTP` skips IP extraction and rate consumption, and `ConnectionsLimiter` skips tracking when `maxConnections` is zero. Micro-benchmarks on Apple M4 Pro show the noop path is 5-160x faster in isolation (NoLimits = noop after fix, HighDefaultRate = old behavior with fake 100M rate): BenchmarkRegisterRequest/NoRates 1.85 ns/op 0 B/op 0 allocs/op BenchmarkRegisterRequest/HighDefaultRate 306 ns/op 64 B/op 3 allocs/op BenchmarkHTTPMiddleware/NoLimits 82.0 ns/op 208 B/op 4 allocs/op BenchmarkHTTPMiddleware/HighDefaultRate 405 ns/op 264 B/op 7 allocs/op BenchmarkRegisterRequestAndConnection/NoLimits 18.8 ns/op 32 B/op 1 allocs/op BenchmarkRegisterRequestAndConnection/HighDefaultRate 323 ns/op 96 B/op 4 allocs/op BenchmarkUnaryInterceptor/NoLimits 18.5 ns/op 0 B/op 0 allocs/op BenchmarkUnaryInterceptor/HighDefaultRate 322 ns/op 64 B/op 3 allocs/op BenchmarkStreamInterceptor/NoLimits 33.2 ns/op 32 B/op 1 allocs/op BenchmarkStreamInterceptor/HighDefaultRate 337 ns/op 96 B/op 4 allocs/op However, a macro-benchmark through a real TCP HTTP server shows no measurable difference in end-to-end throughput. The limiter's ~300ns is noise against the ~9500ns of TCP/HTTP overhead per request: BenchmarkHTTPServer/NoLimits 9510 ns/op 4681 B/op 54 allocs/op BenchmarkHTTPServer/HighDefaultRate 9600 ns/op 4752 B/op 57 allocs/op This change is primarily about code clarity - remove the fake default rate workaround and make the limiter honestly do nothing when no limits are configured - rather than a meaningful throughput improvement.
1 parent 434696d commit 3dea77f

File tree

5 files changed

+225
-37
lines changed

5 files changed

+225
-37
lines changed

lib/limiter/connlimiter.go

Lines changed: 16 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -41,8 +41,9 @@ type ConnectionsLimiter struct {
4141
connections map[string]int64
4242
}
4343

44-
// NewConnectionsLimiter returns new connection limiter, in case if connection
45-
// limits are not set, they won't be tracked
44+
// NewConnectionsLimiter returns a new connection limiter. If
45+
// maxConnections is zero, the limiter performs no tracking and
46+
// all requests pass through.
4647
func NewConnectionsLimiter(maxConnections int64) *ConnectionsLimiter {
4748
return &ConnectionsLimiter{
4849
maxConnections: maxConnections,
@@ -63,6 +64,11 @@ func (l *ConnectionsLimiter) ServeHTTP(w http.ResponseWriter, r *http.Request) {
6364
return
6465
}
6566

67+
if l.maxConnections == 0 {
68+
l.next.ServeHTTP(w, r)
69+
return
70+
}
71+
6672
clientIP, err := ratelimit.ExtractClientIP(r)
6773
if err != nil {
6874
l.log.WarnContext(context.Background(), "failed to extract source IP", "remote_addr", r.RemoteAddr)
@@ -82,7 +88,9 @@ func (l *ConnectionsLimiter) ServeHTTP(w http.ResponseWriter, r *http.Request) {
8288
l.next.ServeHTTP(w, r)
8389
}
8490

85-
// AcquireConnection acquires connection and bumps counter
91+
// AcquireConnection increments the connection count for the
92+
// given token. If maxConnections is zero, it returns nil
93+
// immediately without tracking.
8694
func (l *ConnectionsLimiter) AcquireConnection(token string) error {
8795
if l.maxConnections == 0 {
8896
return nil
@@ -105,7 +113,8 @@ func (l *ConnectionsLimiter) AcquireConnection(token string) error {
105113
return nil
106114
}
107115

108-
// ReleaseConnection decrements the counter
116+
// ReleaseConnection decrements the connection count for the
117+
// given token. If maxConnections is zero, it returns immediately.
109118
func (l *ConnectionsLimiter) ReleaseConnection(token string) {
110119
if l.maxConnections == 0 {
111120
return
@@ -126,7 +135,9 @@ func (l *ConnectionsLimiter) ReleaseConnection(token string) {
126135
}
127136
}
128137

129-
// GetNumConnection returns the current number of connections for a token
138+
// GetNumConnection returns the current connection count for the
139+
// given token. If maxConnections is zero, it returns 0 because
140+
// connections are not tracked.
130141
func (l *ConnectionsLimiter) GetNumConnection(token string) (int64, error) {
131142
if l.maxConnections == 0 {
132143
return 0, nil

lib/limiter/internal/ratelimit/ratelimit.go

Lines changed: 31 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -47,8 +47,8 @@ type TokenLimiterConfig struct {
4747
}
4848

4949
func (t *TokenLimiterConfig) CheckAndSetDefaults() error {
50-
if len(t.Rates.m) == 0 {
51-
return trace.BadParameter("missing required rates")
50+
if t.Rates == nil {
51+
t.Rates = &RateSet{m: make(map[time.Duration]*rate)}
5252
}
5353

5454
if t.Clock == nil {
@@ -71,17 +71,22 @@ func New(config TokenLimiterConfig) (*TokenLimiter, error) {
7171
log: slog.With(teleport.ComponentKey, "ratelimiter"),
7272
}
7373

74-
bucketSets, err := utils.NewFnCache(utils.FnCacheConfig{
75-
// The default TTL here is not super important because we set the
76-
// TTL explicitly for each entry we insert.
77-
TTL: 10 * time.Second,
78-
Clock: config.Clock,
79-
})
80-
if err != nil {
81-
return nil, trace.Wrap(err)
74+
// Skip FnCache creation when no rates are configured. FnCache
75+
// spawns a background goroutine for cleanup, and the noop path
76+
// in ServeHTTP never touches bucketSets.
77+
if config.Rates.Len() > 0 {
78+
bucketSets, err := utils.NewFnCache(utils.FnCacheConfig{
79+
// The default TTL here is not super important because we
80+
// set the TTL explicitly for each entry we insert.
81+
TTL: 10 * time.Second,
82+
Clock: config.Clock,
83+
})
84+
if err != nil {
85+
return nil, trace.Wrap(err)
86+
}
87+
tl.bucketSets = bucketSets
8288
}
8389

84-
tl.bucketSets = bucketSets
8590
return tl, nil
8691
}
8792

@@ -90,6 +95,11 @@ func (tl *TokenLimiter) Wrap(next http.Handler) {
9095
}
9196

9297
func (tl *TokenLimiter) ServeHTTP(w http.ResponseWriter, req *http.Request) {
98+
if tl.defaultRates.Len() == 0 {
99+
tl.next.ServeHTTP(w, req)
100+
return
101+
}
102+
93103
clientIP, err := ExtractClientIP(req)
94104
if err != nil {
95105
ServeHTTPError(w, req, err)
@@ -146,7 +156,7 @@ type rate struct {
146156
burst int64
147157
}
148158

149-
// NewRateSet crates an empty rate set.
159+
// NewRateSet creates an empty rate set.
150160
func NewRateSet() *RateSet {
151161
return &RateSet{m: make(map[time.Duration]*rate)}
152162
}
@@ -167,6 +177,15 @@ func (rs *RateSet) Add(period time.Duration, average int64, burst int64) error {
167177
return nil
168178
}
169179

180+
// Len returns the number of rates in the set. It is safe to call on a
181+
// nil receiver.
182+
func (rs *RateSet) Len() int {
183+
if rs == nil {
184+
return 0
185+
}
186+
return len(rs.m)
187+
}
188+
170189
// MaxPeriod returns the maximum period in the rate set.
171190
func (rs *RateSet) MaxPeriod() time.Duration {
172191
var result time.Duration

lib/limiter/limiter.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,7 @@ func (l *Limiter) RegisterRequestAndConnection(token string) (func(), error) {
113113

114114
type RateSet = ratelimit.RateSet
115115

116-
// NewRateSet crates an empty `RateSet` instance.
116+
// NewRateSet creates an empty RateSet.
117117
func NewRateSet() *RateSet { return ratelimit.NewRateSet() }
118118

119119
// UnaryServerInterceptor returns a gRPC unary interceptor which

lib/limiter/limiter_test.go

Lines changed: 163 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -460,6 +460,169 @@ func mustServeAndReceiveStatusCode(t *testing.T, handler http.Handler, wantStatu
460460
require.Equal(t, wantStatusCode, response.StatusCode)
461461
}
462462

463+
func TestNoRates_RegisterRequest(t *testing.T) {
464+
t.Parallel()
465+
466+
limiter, err := NewLimiter(Config{})
467+
require.NoError(t, err)
468+
469+
// With no rates configured, RegisterRequest should never reject.
470+
for range 100 {
471+
require.NoError(t, limiter.RegisterRequest("token1"))
472+
}
473+
}
474+
475+
func TestNoRates_Middleware(t *testing.T) {
476+
t.Parallel()
477+
478+
limiter, err := NewLimiter(Config{})
479+
require.NoError(t, err)
480+
481+
middleware := MakeMiddleware(limiter)
482+
handler := middleware(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
483+
w.WriteHeader(http.StatusAccepted)
484+
}))
485+
486+
// With no rates and no max connections, every request should pass through.
487+
for range 100 {
488+
mustServeAndReceiveStatusCode(t, handler, http.StatusAccepted)
489+
}
490+
}
491+
492+
func TestNoRates_CustomRateStillApplied(t *testing.T) {
493+
t.Parallel()
494+
495+
clock := clockwork.NewFakeClock()
496+
limiter, err := NewLimiter(Config{Clock: clock})
497+
require.NoError(t, err)
498+
499+
customRate := ratelimit.NewRateSet()
500+
err = customRate.Add(time.Minute, 1, 2)
501+
require.NoError(t, err)
502+
503+
// Custom rate should still be enforced even without default rates.
504+
require.NoError(t, limiter.RegisterRequestWithCustomRate("token1", customRate))
505+
require.NoError(t, limiter.RegisterRequestWithCustomRate("token1", customRate))
506+
require.Error(t, limiter.RegisterRequestWithCustomRate("token1", customRate))
507+
}
508+
509+
func TestNoRates_IsRateLimited(t *testing.T) {
510+
t.Parallel()
511+
512+
limiter, err := NewRateLimiter(Config{})
513+
require.NoError(t, err)
514+
515+
// With no rates configured, IsRateLimited should always return false.
516+
require.False(t, limiter.IsRateLimited("token1"))
517+
518+
// RegisterRequest is a no-op, so even after many calls the token
519+
// should not appear rate-limited.
520+
for range 100 {
521+
require.NoError(t, limiter.RegisterRequest("token1", nil))
522+
}
523+
require.False(t, limiter.IsRateLimited("token1"))
524+
}
525+
526+
func TestNoRates_UnaryServerInterceptor(t *testing.T) {
527+
t.Parallel()
528+
529+
limiter, err := NewLimiter(Config{})
530+
require.NoError(t, err)
531+
532+
ctx := peer.NewContext(t.Context(), &peer.Peer{Addr: mockAddr{}})
533+
serverInfo := &grpc.UnaryServerInfo{FullMethod: "/method"}
534+
handler := func(context.Context, any) (any, error) { return "ok", nil }
535+
536+
interceptor := limiter.UnaryServerInterceptor()
537+
538+
// With no rates, the interceptor should never reject.
539+
for range 100 {
540+
resp, err := interceptor(ctx, "request", serverInfo, handler)
541+
require.NoError(t, err)
542+
require.Equal(t, "ok", resp)
543+
}
544+
}
545+
546+
func TestNoRates_StreamServerInterceptor(t *testing.T) {
547+
t.Parallel()
548+
549+
limiter, err := NewLimiter(Config{})
550+
require.NoError(t, err)
551+
552+
ctx := peer.NewContext(t.Context(), &peer.Peer{Addr: mockAddr{}})
553+
ss := mockServerStream{ctx: ctx}
554+
info := &grpc.StreamServerInfo{}
555+
handler := func(srv any, stream grpc.ServerStream) error { return nil }
556+
557+
// With no rates, the interceptor should never reject.
558+
for range 100 {
559+
require.NoError(t, limiter.StreamServerInterceptor(nil, ss, info, handler))
560+
}
561+
}
562+
563+
func TestNoRates_RegisterRequestAndConnection(t *testing.T) {
564+
t.Parallel()
565+
566+
limiter, err := NewLimiter(Config{})
567+
require.NoError(t, err)
568+
569+
// With no rates and no max connections, should never reject.
570+
for range 100 {
571+
release, err := limiter.RegisterRequestAndConnection("127.0.0.1")
572+
require.NoError(t, err)
573+
release()
574+
}
575+
}
576+
577+
func TestNoRates_WithMaxConnections(t *testing.T) {
578+
t.Parallel()
579+
580+
limiter, err := NewLimiter(Config{MaxConnections: 2})
581+
require.NoError(t, err)
582+
583+
// Rate limiting should pass through (no rates), but connection
584+
// limiting should still enforce.
585+
for range 100 {
586+
require.NoError(t, limiter.RegisterRequest("token1"))
587+
}
588+
589+
// Connection limit should still work.
590+
release1, err := limiter.RegisterRequestAndConnection("127.0.0.1")
591+
require.NoError(t, err)
592+
release2, err := limiter.RegisterRequestAndConnection("127.0.0.1")
593+
require.NoError(t, err)
594+
595+
// Third connection should be rejected.
596+
_, err = limiter.RegisterRequestAndConnection("127.0.0.1")
597+
require.Error(t, err)
598+
require.True(t, trace.IsLimitExceeded(err))
599+
600+
// Release one, then the third should succeed.
601+
release1()
602+
release3, err := limiter.RegisterRequestAndConnection("127.0.0.1")
603+
require.NoError(t, err)
604+
release2()
605+
release3()
606+
}
607+
608+
func TestNoRates_MiddlewareWithMaxConnections(t *testing.T) {
609+
t.Parallel()
610+
611+
limiter, err := NewLimiter(Config{MaxConnections: 1})
612+
require.NoError(t, err)
613+
614+
// Verify rate limiting passes through in HTTP path by sending
615+
// many sequential requests (no concurrent connections).
616+
middleware := MakeMiddleware(limiter)
617+
handler := middleware(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
618+
w.WriteHeader(http.StatusAccepted)
619+
}))
620+
621+
for range 100 {
622+
mustServeAndReceiveStatusCode(t, handler, http.StatusAccepted)
623+
}
624+
}
625+
463626
func TestRateLimiter_IsRateLimited(t *testing.T) {
464627
t.Parallel()
465628

lib/limiter/ratelimiter.go

Lines changed: 14 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -32,12 +32,6 @@ import (
3232
"github.com/gravitational/teleport/lib/utils"
3333
)
3434

35-
const (
36-
// defaultRate is the maximum number of requests per second that the limiter
37-
// will allow when no rate limits are configured
38-
defaultRate = 100_000_000
39-
)
40-
4135
// RateLimiter controls connection rate using the token bucket algorithm.
4236
// See: https://en.wikipedia.org/wiki/Token_bucket
4337
type RateLimiter struct {
@@ -66,12 +60,6 @@ func NewRateLimiter(config Config) (*RateLimiter, error) {
6660
return nil, trace.Wrap(err)
6761
}
6862
}
69-
if len(config.Rates) == 0 {
70-
err := limiter.rates.Add(time.Second, defaultRate, defaultRate)
71-
if err != nil {
72-
return nil, trace.Wrap(err)
73-
}
74-
}
7563

7664
if config.Clock == nil {
7765
config.Clock = clockwork.NewRealClock()
@@ -89,8 +77,8 @@ func NewRateLimiter(config Config) (*RateLimiter, error) {
8977
}
9078

9179
limiter.rateLimits, err = utils.NewFnCache(utils.FnCacheConfig{
92-
// The default TTL here is not super important because we set the
93-
// TTL explicitly for each entry we insert.
80+
// The default TTL here is not super important because we set
81+
// the TTL explicitly for each entry we insert.
9482
TTL: 10 * time.Second,
9583
Clock: config.Clock,
9684
})
@@ -118,14 +106,21 @@ func (l *RateLimiter) IsRateLimited(token string) bool {
118106
return bucket.IsRateLimited()
119107
}
120108

121-
// RegisterRequest increases number of requests for the provided token
122-
// Returns error if there are too many requests with the provided token.
109+
// RegisterRequest increases number of requests for the provided token.
110+
// If neither the default rates nor a custom rate are configured, the
111+
// request passes through without any limiting.
123112
func (l *RateLimiter) RegisterRequest(token string, customRate *ratelimit.RateSet) error {
113+
// cmp.Or returns the first non-zero value. A non-nil RateSet
114+
// with zero entries is still selected over l.rates, which is
115+
// fine because Len() == 0 catches that case below.
116+
rate := cmp.Or(customRate, l.rates)
117+
if rate.Len() == 0 {
118+
return nil
119+
}
120+
124121
l.mu.Lock()
125122
defer l.mu.Unlock()
126123

127-
rate := cmp.Or(customRate, l.rates)
128-
129124
// We set the TTL as 10 times the rate period. E.g. if rate is 100 requests/second
130125
// per client IP, the counters for this IP will expire after 10 seconds of inactivity.
131126
ttl := rate.MaxPeriod()*10 + 1
@@ -150,7 +145,7 @@ func (l *RateLimiter) RegisterRequest(token string, customRate *ratelimit.RateSe
150145
return nil
151146
}
152147

153-
// Add rate limiter to the handle
148+
// WrapHandle wraps the given HTTP handler with the rate limiter.
154149
func (l *RateLimiter) WrapHandle(h http.Handler) {
155150
l.TokenLimiter.Wrap(h)
156151
}

0 commit comments

Comments
 (0)