Skip to content

Commit 2bf5582

Browse files
authored
fix(mapsutil): data race in SyncLockMap cleanup (#705)
* fix(mapsutil): data race in `SyncLockMap` cleanup The race was detected when multiple goroutines concurrently called `(*SyncLockMap[K, V]).Get()` or `(*SyncLockMap[K, V]).Set()`, each triggering `(*SyncLockMap[K, V]).triggerCleanupIfNeeded()` which had unsynchronized access to the [time.Time] field. Fixes #704 Signed-off-by: Dwi Siswanto <git@dw1.io> * test(mapsutil): add concurrent tests Signed-off-by: Dwi Siswanto <git@dw1.io> --------- Signed-off-by: Dwi Siswanto <git@dw1.io>
1 parent 5314f45 commit 2bf5582

File tree

2 files changed

+325
-8
lines changed

2 files changed

+325
-8
lines changed

maps/synclock_map.go

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -71,15 +71,18 @@ func (s *SyncLockMap[K, V]) triggerCleanupIfNeeded() {
7171
return
7272
}
7373

74-
// Check if cleanup is needed using instance-specific interval
7574
now := time.Now()
76-
if now.Sub(s.lastCleanup) < s.cleanupInterval {
77-
return
75+
76+
s.mu.Lock()
77+
shouldCleanup := now.Sub(s.lastCleanup) >= s.cleanupInterval
78+
if shouldCleanup {
79+
s.lastCleanup = now
7880
}
81+
s.mu.Unlock()
7982

80-
// Update last cleanup time and trigger async cleanup
81-
s.lastCleanup = now
82-
go s.evictInactiveEntries()
83+
if shouldCleanup {
84+
go s.evictInactiveEntries()
85+
}
8386
}
8487

8588
// ForceCleanup forces an immediate cleanup (useful for testing)
@@ -136,8 +139,6 @@ func (s *SyncLockMap[K, V]) Set(k K, v V) error {
136139
}
137140

138141
s.mu.Lock()
139-
defer s.mu.Unlock()
140-
141142
now := time.Now()
142143

143144
// If eviction is enabled, handle eviction logic
@@ -158,6 +159,7 @@ func (s *SyncLockMap[K, V]) Set(k K, v V) error {
158159
}
159160

160161
s.Map[k] = v
162+
s.mu.Unlock()
161163

162164
// Trigger cleanup if needed
163165
s.triggerCleanupIfNeeded()

maps/synclock_map_test.go

Lines changed: 315 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ package mapsutil
22

33
import (
44
"errors"
5+
"sync"
56
"testing"
67
"time"
78

@@ -333,3 +334,317 @@ func TestSyncLockMapWithEviction(t *testing.T) {
333334
})
334335

335336
}
337+
338+
func TestSyncLockMapConcurrent(t *testing.T) {
339+
const numGoroutines = 100
340+
const numIterations = 100
341+
342+
t.Run("Get", func(t *testing.T) {
343+
m := NewSyncLockMap(WithEviction[string, string](1*time.Second, 1*time.Nanosecond))
344+
345+
err := m.Set("key1", "value1")
346+
require.NoError(t, err)
347+
348+
var wg sync.WaitGroup
349+
for range numGoroutines {
350+
wg.Add(1)
351+
go func() {
352+
defer wg.Done()
353+
for range numIterations {
354+
v, ok := m.Get("key1")
355+
require.True(t, ok)
356+
require.Equal(t, "value1", v)
357+
}
358+
}()
359+
}
360+
wg.Wait()
361+
})
362+
363+
t.Run("Set", func(t *testing.T) {
364+
m := NewSyncLockMap[string, string]()
365+
366+
var wg sync.WaitGroup
367+
for i := range numGoroutines {
368+
wg.Add(1)
369+
go func(i int) {
370+
defer wg.Done()
371+
for j := range numIterations {
372+
key := string(rune('a' + (i+j)%26))
373+
err := m.Set(key, "value")
374+
require.NoError(t, err)
375+
}
376+
}(i)
377+
}
378+
wg.Wait()
379+
})
380+
381+
t.Run("Delete", func(t *testing.T) {
382+
m := NewSyncLockMap[string, string]()
383+
384+
// Populate map
385+
for i := range numGoroutines {
386+
key := string(rune('a' + i%26))
387+
_ = m.Set(key, "value")
388+
}
389+
390+
var wg sync.WaitGroup
391+
for i := range numGoroutines {
392+
wg.Add(1)
393+
go func(i int) {
394+
defer wg.Done()
395+
for j := range numIterations {
396+
key := string(rune('a' + (i+j)%26))
397+
m.Delete(key)
398+
}
399+
}(i)
400+
}
401+
wg.Wait()
402+
})
403+
404+
t.Run("Has", func(t *testing.T) {
405+
m := NewSyncLockMap[string, string]()
406+
_ = m.Set("key1", "value1")
407+
408+
var wg sync.WaitGroup
409+
for range numGoroutines {
410+
wg.Add(1)
411+
go func() {
412+
defer wg.Done()
413+
for range numIterations {
414+
_ = m.Has("key1")
415+
}
416+
}()
417+
}
418+
wg.Wait()
419+
})
420+
421+
t.Run("IsEmpty", func(t *testing.T) {
422+
m := NewSyncLockMap[string, string]()
423+
_ = m.Set("key1", "value1")
424+
425+
var wg sync.WaitGroup
426+
for range numGoroutines {
427+
wg.Add(1)
428+
go func() {
429+
defer wg.Done()
430+
for range numIterations {
431+
_ = m.IsEmpty()
432+
}
433+
}()
434+
}
435+
wg.Wait()
436+
})
437+
438+
t.Run("GetAll", func(t *testing.T) {
439+
m := NewSyncLockMap[string, string]()
440+
_ = m.Set("key1", "value1")
441+
_ = m.Set("key2", "value2")
442+
443+
var wg sync.WaitGroup
444+
for range numGoroutines {
445+
wg.Add(1)
446+
go func() {
447+
defer wg.Done()
448+
for range numIterations {
449+
all := m.GetAll()
450+
require.NotNil(t, all)
451+
}
452+
}()
453+
}
454+
wg.Wait()
455+
})
456+
457+
t.Run("GetKeyWithValue", func(t *testing.T) {
458+
m := NewSyncLockMap[string, string]()
459+
_ = m.Set("key1", "value1")
460+
461+
var wg sync.WaitGroup
462+
for range numGoroutines {
463+
wg.Add(1)
464+
go func() {
465+
defer wg.Done()
466+
for range numIterations {
467+
key, ok := m.GetKeyWithValue("value1")
468+
if ok {
469+
require.Equal(t, "key1", key)
470+
}
471+
}
472+
}()
473+
}
474+
wg.Wait()
475+
})
476+
477+
t.Run("Clone", func(t *testing.T) {
478+
m := NewSyncLockMap[string, string]()
479+
_ = m.Set("key1", "value1")
480+
_ = m.Set("key2", "value2")
481+
482+
var wg sync.WaitGroup
483+
for range numGoroutines {
484+
wg.Add(1)
485+
go func() {
486+
defer wg.Done()
487+
for range numIterations {
488+
cloned := m.Clone()
489+
require.NotNil(t, cloned)
490+
require.True(t, cloned.Has("key1"))
491+
}
492+
}()
493+
}
494+
wg.Wait()
495+
})
496+
497+
t.Run("Iterate", func(t *testing.T) {
498+
m := NewSyncLockMap[string, string]()
499+
_ = m.Set("key1", "value1")
500+
_ = m.Set("key2", "value2")
501+
502+
var wg sync.WaitGroup
503+
for range numGoroutines {
504+
wg.Add(1)
505+
go func() {
506+
defer wg.Done()
507+
for range numIterations {
508+
count := 0
509+
err := m.Iterate(func(k string, v string) error {
510+
count++
511+
return nil
512+
})
513+
require.NoError(t, err)
514+
}
515+
}()
516+
}
517+
wg.Wait()
518+
})
519+
520+
t.Run("Lock", func(t *testing.T) {
521+
var wg sync.WaitGroup
522+
for range numGoroutines {
523+
wg.Add(1)
524+
go func() {
525+
defer wg.Done()
526+
// Each goroutine gets its own map to avoid interference
527+
m := NewSyncLockMap[string, string]()
528+
_ = m.Set("key1", "value1")
529+
530+
for range numIterations {
531+
m.Lock()
532+
// When locked, Set should fail
533+
err := m.Set("test", "test")
534+
require.Error(t, err)
535+
require.Equal(t, ErrReadOnly, err)
536+
m.Unlock()
537+
}
538+
}()
539+
}
540+
wg.Wait()
541+
})
542+
543+
t.Run("Unlock", func(t *testing.T) {
544+
var wg sync.WaitGroup
545+
for range numGoroutines {
546+
wg.Add(1)
547+
go func() {
548+
defer wg.Done()
549+
// Each goroutine gets its own map to avoid interference
550+
m := NewSyncLockMap[string, string]()
551+
_ = m.Set("key1", "value1")
552+
553+
for range numIterations {
554+
m.Lock()
555+
wasReadOnly := m.ReadOnly.Load()
556+
m.Unlock()
557+
require.True(t, wasReadOnly)
558+
// When unlocked, Set should succeed
559+
err := m.Set("test", "test")
560+
require.NoError(t, err)
561+
}
562+
}()
563+
}
564+
wg.Wait()
565+
})
566+
567+
t.Run("Merge", func(t *testing.T) {
568+
m := NewSyncLockMap[string, string]()
569+
570+
var wg sync.WaitGroup
571+
for i := range numGoroutines {
572+
wg.Add(1)
573+
go func(i int) {
574+
defer wg.Done()
575+
for j := range numIterations {
576+
key := string(rune('a' + (i+j)%26))
577+
err := m.Merge(map[string]string{key: "value"})
578+
require.NoError(t, err)
579+
}
580+
}(i)
581+
}
582+
wg.Wait()
583+
})
584+
585+
t.Run("Clear", func(t *testing.T) {
586+
m := NewSyncLockMap[string, string]()
587+
588+
var wg sync.WaitGroup
589+
for i := range numGoroutines {
590+
wg.Add(1)
591+
go func(i int) {
592+
defer wg.Done()
593+
for j := range numIterations {
594+
// Add some items
595+
key := string(rune('a' + (i+j)%26))
596+
_ = m.Set(key, "value")
597+
// Clear occasionally
598+
if j%10 == 0 {
599+
_ = m.Clear()
600+
}
601+
}
602+
}(i)
603+
}
604+
wg.Wait()
605+
})
606+
607+
t.Run("CleanupInactiveItems", func(t *testing.T) {
608+
m := NewSyncLockMap(WithEviction[string, string](50*time.Millisecond, 1*time.Hour))
609+
610+
// Populate map
611+
for i := range 10 {
612+
key := string(rune('a' + i))
613+
_ = m.Set(key, "value")
614+
}
615+
616+
var wg sync.WaitGroup
617+
for range numGoroutines {
618+
wg.Add(1)
619+
go func() {
620+
defer wg.Done()
621+
for range numIterations {
622+
m.CleanupInactiveItems()
623+
}
624+
}()
625+
}
626+
wg.Wait()
627+
})
628+
629+
t.Run("ForceCleanup", func(t *testing.T) {
630+
m := NewSyncLockMap(WithEviction[string, string](50*time.Millisecond, 1*time.Hour))
631+
632+
// Populate map
633+
for i := range 10 {
634+
key := string(rune('a' + i))
635+
_ = m.Set(key, "value")
636+
}
637+
638+
var wg sync.WaitGroup
639+
for range numGoroutines {
640+
wg.Add(1)
641+
go func() {
642+
defer wg.Done()
643+
for range numIterations {
644+
m.ForceCleanup()
645+
}
646+
}()
647+
}
648+
wg.Wait()
649+
})
650+
}

0 commit comments

Comments
 (0)