Skip to content

Commit df1036c

Browse files
authored
feat: Added a generic LRUCache interface and a default implementation (#347)
- Added an interface for LRU Caching mechanism. Include a default implementation. - Generics were not used since they were introduced in go 1.18 whereas we provide support up-to go 1.12.
1 parent abd8d8a commit df1036c

File tree

2 files changed

+301
-0
lines changed

2 files changed

+301
-0
lines changed

pkg/odp/lru_cache.go

Lines changed: 112 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,112 @@
1+
/****************************************************************************
2+
* Copyright 2022, Optimizely, Inc. and contributors *
3+
* *
4+
* Licensed under the Apache License, Version 2.0 (the "License"); *
5+
* you may not use this file except in compliance with the License. *
6+
* You may obtain a copy of the License at *
7+
* *
8+
* http://www.apache.org/licenses/LICENSE-2.0 *
9+
* *
10+
* Unless required by applicable law or agreed to in writing, software *
11+
* distributed under the License is distributed on an "AS IS" BASIS, *
12+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
13+
* See the License for the specific language governing permissions and *
14+
* limitations under the License. *
15+
***************************************************************************/
16+
17+
// Package odp //
18+
package odp
19+
20+
import (
21+
"container/list"
22+
"sync"
23+
"time"
24+
)
25+
26+
// Cache is used for caching ODP segments
27+
type Cache interface {
28+
Save(key string, value interface{})
29+
Lookup(key string) interface{}
30+
Reset()
31+
}
32+
33+
type cacheElement struct {
34+
data interface{}
35+
time int64
36+
keyPtr *list.Element
37+
}
38+
39+
// LRUCache a Least Recently Used in-memory cache
40+
type LRUCache struct {
41+
queue *list.List
42+
items map[string]*cacheElement
43+
maxSize int
44+
timeoutInSecs int64
45+
lock *sync.RWMutex
46+
}
47+
48+
// NewLRUCache returns a new instance of Least Recently Used in-memory cache
49+
func NewLRUCache(size int, timeoutInSecs int64) LRUCache {
50+
return LRUCache{queue: list.New(), items: make(map[string]*cacheElement), maxSize: size, timeoutInSecs: timeoutInSecs, lock: new(sync.RWMutex)}
51+
}
52+
53+
// Save stores a new element into the cache
54+
func (l *LRUCache) Save(key string, value interface{}) {
55+
if l.maxSize <= 0 {
56+
return
57+
}
58+
l.lock.Lock()
59+
defer l.lock.Unlock()
60+
if item, ok := l.items[key]; !ok {
61+
// remove the last object if queue is full
62+
if l.maxSize == len(l.items) {
63+
back := l.queue.Back()
64+
l.queue.Remove(back)
65+
delete(l.items, back.Value.(string))
66+
}
67+
// push the new object to the front of the queue
68+
l.items[key] = &cacheElement{data: value, keyPtr: l.queue.PushFront(key), time: time.Now().Unix()}
69+
} else {
70+
item.data = value
71+
l.items[key] = item
72+
l.queue.MoveToFront(item.keyPtr)
73+
}
74+
}
75+
76+
// Lookup retrieves an element from the cache, reordering the elements
77+
func (l *LRUCache) Lookup(key string) interface{} {
78+
if l.maxSize <= 0 {
79+
return nil
80+
}
81+
l.lock.Lock()
82+
defer l.lock.Unlock()
83+
if item, ok := l.items[key]; ok {
84+
if l.isValid(item) {
85+
l.queue.MoveToFront(item.keyPtr)
86+
return item.data
87+
}
88+
l.queue.Remove(item.keyPtr)
89+
delete(l.items, item.keyPtr.Value.(string))
90+
}
91+
return nil
92+
}
93+
94+
// Reset clears all the elements from the cache
95+
func (l *LRUCache) Reset() {
96+
if l.maxSize <= 0 {
97+
return
98+
}
99+
l.lock.Lock()
100+
defer l.lock.Unlock()
101+
l.queue = list.New()
102+
l.items = make(map[string]*cacheElement)
103+
}
104+
105+
func (l *LRUCache) isValid(e *cacheElement) bool {
106+
if l.timeoutInSecs <= 0 {
107+
return true
108+
}
109+
currenttime := time.Now().Unix()
110+
elapsedtime := currenttime - e.time
111+
return l.timeoutInSecs > elapsedtime
112+
}

pkg/odp/lru_cache_test.go

Lines changed: 189 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,189 @@
1+
package odp
2+
3+
import (
4+
"fmt"
5+
"sync"
6+
"testing"
7+
"time"
8+
9+
"github.com/stretchr/testify/assert"
10+
)
11+
12+
func TestMinConfig(t *testing.T) {
13+
cache := NewLRUCache(1000, 2000)
14+
assert.Equal(t, 1000, cache.maxSize)
15+
assert.Equal(t, int64(2000), cache.timeoutInSecs)
16+
17+
cache = NewLRUCache(0, 0)
18+
assert.Equal(t, 0, cache.maxSize)
19+
assert.Equal(t, int64(0), cache.timeoutInSecs)
20+
}
21+
22+
func TestSaveAndLookupConfig(t *testing.T) {
23+
maxSize := 2
24+
cache := NewLRUCache(maxSize, 1000)
25+
26+
cache.Save("1", 100) // [1]
27+
cache.Save("2", 200) // [1, 2]
28+
cache.Save("3", 300) // [2, 3]
29+
assert.Nil(t, cache.Lookup("1"))
30+
assert.Equal(t, 200, cache.Lookup("2")) // [3, 2]
31+
assert.Equal(t, 300, cache.Lookup("3")) // [2, 3]
32+
33+
cache.Save("2", 201) // [3, 2]
34+
cache.Save("1", 101) // [2, 1]
35+
assert.Equal(t, 101, cache.Lookup("1"))
36+
assert.Equal(t, 201, cache.Lookup("2")) // [1, 2]
37+
assert.Nil(t, cache.Lookup("3"))
38+
39+
cache.Save("3", 302) // [2, 3]
40+
assert.Nil(t, cache.Lookup("1"))
41+
assert.Equal(t, 201, cache.Lookup("2")) // [3, 2]
42+
assert.Equal(t, 302, cache.Lookup("3")) // [2, 3]
43+
44+
cache.Save("1", 103) // [3, 1]
45+
assert.Equal(t, 103, cache.Lookup("1"))
46+
assert.Nil(t, cache.Lookup("2"))
47+
assert.Equal(t, 302, cache.Lookup("3")) // [1, 3]
48+
49+
// Check if old items were deleted
50+
assert.Equal(t, maxSize, cache.queue.Len())
51+
assert.Equal(t, maxSize, len(cache.items))
52+
}
53+
54+
func TestReset(t *testing.T) {
55+
maxSize := 2
56+
cache := NewLRUCache(maxSize, 1000)
57+
58+
cache.Save("1", 100) // [1]
59+
cache.Save("2", 200) // [1, 2]
60+
61+
assert.Equal(t, maxSize, cache.queue.Len())
62+
63+
// cache reset
64+
cache.Reset()
65+
assert.Equal(t, 0, cache.queue.Len())
66+
assert.Equal(t, 0, len(cache.items))
67+
68+
// validate cache fully functional after reset
69+
cache.Save("1", 100) // [1]
70+
cache.Save("2", 200) // [1, 2]
71+
cache.Save("3", 300) // [2, 3]
72+
assert.Nil(t, cache.Lookup("1"))
73+
assert.Equal(t, 200, cache.Lookup("2")) // [3, 2]
74+
assert.Equal(t, 300, cache.Lookup("3")) // [2, 3]
75+
76+
cache.Save("2", 201) // [3, 2]
77+
cache.Save("1", 101) // [2, 1]
78+
assert.Equal(t, 101, cache.Lookup("1"))
79+
assert.Equal(t, 201, cache.Lookup("2")) // [1, 2]
80+
assert.Nil(t, cache.Lookup("3"))
81+
}
82+
83+
func TestSizeZero(t *testing.T) {
84+
cache := NewLRUCache(0, 1000)
85+
cache.Save("1", 100)
86+
assert.Nil(t, cache.Lookup("1"))
87+
cache.Save("2", 200)
88+
assert.Nil(t, cache.Lookup("2"))
89+
cache.Reset()
90+
assert.Nil(t, cache.Lookup("1"))
91+
assert.Nil(t, cache.Lookup("2"))
92+
}
93+
94+
func TestThreadSafe(t *testing.T) {
95+
maxSize := 1000
96+
cache := NewLRUCache(maxSize, 1000)
97+
wg := sync.WaitGroup{}
98+
99+
save := func(k int, v interface{}, wg *sync.WaitGroup) {
100+
defer wg.Done()
101+
strKey := fmt.Sprintf("%d", k)
102+
cache.Save(strKey, v)
103+
}
104+
lookup := func(k int, wg *sync.WaitGroup, checkValue bool) {
105+
defer wg.Done()
106+
strKey := fmt.Sprintf("%d", k)
107+
v := cache.Lookup(strKey)
108+
if checkValue {
109+
assert.Equal(t, k*100, v)
110+
}
111+
}
112+
reset := func(wg *sync.WaitGroup) {
113+
defer wg.Done()
114+
cache.Reset()
115+
}
116+
117+
// Add entries
118+
wg.Add(maxSize)
119+
for i := 1; i <= maxSize; i++ {
120+
go save(i, i*100, &wg)
121+
}
122+
wg.Wait()
123+
124+
// Lookup previous entries
125+
wg.Add(maxSize)
126+
for i := 1; i <= maxSize; i++ {
127+
go lookup(i, &wg, true)
128+
}
129+
wg.Wait()
130+
131+
// Add more entries then the max size
132+
wg.Add(maxSize)
133+
for i := maxSize + 1; i <= maxSize*2; i++ {
134+
go save(i, i*100, &wg)
135+
}
136+
wg.Wait()
137+
138+
// Check if new entries replaced the old ones
139+
wg.Add(maxSize)
140+
for i := maxSize + 1; i <= maxSize*2; i++ {
141+
go lookup(i, &wg, true)
142+
}
143+
wg.Wait()
144+
145+
// Check if old items were deleted
146+
assert.Equal(t, maxSize, cache.queue.Len())
147+
assert.Equal(t, maxSize, len(cache.items))
148+
149+
wg.Add(maxSize * 3)
150+
// Check all api's simultaneously for race conditions
151+
for i := 1; i <= maxSize; i++ {
152+
go save(i, i*100, &wg)
153+
go lookup(i, &wg, false)
154+
go reset(&wg)
155+
}
156+
wg.Wait()
157+
}
158+
159+
func TestTimeout(t *testing.T) {
160+
var maxTimeout int64 = 1
161+
// cache with timeout
162+
cache1 := NewLRUCache(1000, maxTimeout)
163+
// Zero timeout cache
164+
cache2 := NewLRUCache(1000, 0)
165+
166+
cache1.Save("1", 100) // [1]
167+
cache1.Save("2", 200) // [1, 2]
168+
cache1.Save("3", 300) // [1,2,3]
169+
cache2.Save("1", 100) // [1]
170+
cache2.Save("2", 200) // [1, 2]
171+
cache2.Save("3", 300) // [1,2,3]
172+
173+
// cache1 should expire while cache2 should not
174+
time.Sleep(1 * time.Second)
175+
176+
// cache1 should expire
177+
assert.Nil(t, cache1.Lookup("1"))
178+
assert.Nil(t, cache1.Lookup("2"))
179+
assert.Nil(t, cache1.Lookup("3"))
180+
cache1.Save("1", 100) // [1]
181+
cache1.Save("4", 400) // [1,4]
182+
assert.Equal(t, 100, cache1.Lookup("1")) // [4,1]
183+
assert.Equal(t, 400, cache1.Lookup("4")) // [1,4]
184+
185+
// cache2 should not expire
186+
assert.Equal(t, 100, cache2.Lookup("1"))
187+
assert.Equal(t, 200, cache2.Lookup("2"))
188+
assert.Equal(t, 300, cache2.Lookup("3"))
189+
}

0 commit comments

Comments
 (0)