@@ -63,10 +63,8 @@ type incrRollupi struct {
6363}
6464
6565type CachePL struct {
66- count int
6766 list * List
6867 lastUpdate uint64
69- lastRead time.Time
7068}
7169
7270var (
@@ -401,8 +399,8 @@ func (c *Cache) clear() {
401399}
402400
403401type MemoryLayer struct {
404- keepUpdates bool
405- cache * Cache
402+ deleteOnUpdates bool
403+ cache * Cache
406404
407405 numDisksRead int
408406}
@@ -414,9 +412,9 @@ func (ml *MemoryLayer) del(key []byte) {
414412 ml .cache .del (key )
415413}
416414
417- func initMemoryLayer (cacheSize int64 , keepUpdates bool ) * MemoryLayer {
415+ func initMemoryLayer (cacheSize int64 , deleteOnUpdates bool ) * MemoryLayer {
418416 ml := & MemoryLayer {}
419- ml .keepUpdates = keepUpdates
417+ ml .deleteOnUpdates = deleteOnUpdates
420418 if cacheSize > 0 {
421419 cache , err := ristretto.NewCache [[]byte , * CachePL ](& ristretto.Config [[]byte , * CachePL ]{
422420 // Use 5% of cache memory for storing counters.
@@ -449,9 +447,7 @@ func initMemoryLayer(cacheSize int64, keepUpdates bool) *MemoryLayer {
449447
450448func NewCachePL () * CachePL {
451449 return & CachePL {
452- count : 0 ,
453- list : nil ,
454- lastUpdate : 0 ,
450+ list : nil ,
455451 }
456452}
457453
@@ -472,7 +468,7 @@ func (ml *MemoryLayer) updateItemInCache(key string, delta []byte, startTs, comm
472468 return
473469 }
474470
475- if ! ml .keepUpdates {
471+ if ml .deleteOnUpdates {
476472 // TODO We should mark the key as deleted instead of directly deleting from the cache.
477473 ml .del ([]byte (key ))
478474 return
@@ -484,9 +480,8 @@ func (ml *MemoryLayer) updateItemInCache(key string, delta []byte, startTs, comm
484480 }
485481
486482 val .lastUpdate = commitTs
487- val .count -= 1
488483
489- if val .list != nil && ml . keepUpdates {
484+ if val .list != nil {
490485 p := new (pb.PostingList )
491486 x .Check (proto .Unmarshal (delta , p ))
492487
@@ -640,16 +635,12 @@ func (c *CachePL) Set(l *List, readTs uint64) {
640635func (ml * MemoryLayer ) readFromCache (key []byte , readTs uint64 ) * List {
641636 cacheItem , ok := ml .cache .get (key )
642637
643- if ok {
644- cacheItem .count += 1
645- cacheItem .lastRead = time .Now ()
646- if cacheItem .list != nil && cacheItem .list .minTs <= readTs {
647- cacheItem .list .RLock ()
648- lCopy := copyList (cacheItem .list )
649- cacheItem .list .RUnlock ()
650- checkForRollup (key , lCopy )
651- return lCopy
652- }
638+ if ok && cacheItem .list != nil && cacheItem .list .minTs <= readTs {
639+ cacheItem .list .RLock ()
640+ lCopy := copyList (cacheItem .list )
641+ cacheItem .list .RUnlock ()
642+ checkForRollup (key , lCopy )
643+ return lCopy
653644 }
654645 return nil
655646}
@@ -679,7 +670,6 @@ func (ml *MemoryLayer) saveInCache(key []byte, l *List) {
679670 l .RLock ()
680671 defer l .RUnlock ()
681672 cacheItem := NewCachePL ()
682- cacheItem .count = 1
683673 cacheItem .list = copyList (l )
684674 cacheItem .lastUpdate = l .maxTs
685675 ml .cache .set (key , cacheItem )
0 commit comments