Skip to content

Commit 02b9d13

Browse files
author
Harshil Goel
authored
fix(core): change name of config flag for cache (#9274)
1 parent ee99a7a commit 02b9d13

File tree

5 files changed

+27
-33
lines changed

5 files changed

+27
-33
lines changed

dgraph/cmd/alpha/run.go

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -145,8 +145,12 @@ they form a Raft group and provide synchronous replication.
145145
Flag("percentage",
146146
"Cache percentages summing up to 100 for various caches (FORMAT: PostingListCache,"+
147147
"PstoreBlockCache,PstoreIndexCache)").
148-
Flag("keep-updates",
149-
"Should carry updates in cache or not (bool)").
148+
Flag("delete-on-updates",
149+
"When set as true, we would delete the key from the cache once it's updated. If it's not "+
150+
"we would update the value inside the cache. If the cache gets too full, it starts"+
151+
" to get slow. So if your usecase has a lot of heavy mutations, this should be set"+
152+
" as true. If you are modifying same data again and again, this should be set as"+
153+
" false").
150154
String())
151155

152156
flag.String("raft", worker.RaftDefaults, z.NewSuperFlagHelp(worker.RaftDefaults).
@@ -635,7 +639,7 @@ func run() {
635639
x.AssertTruef(totalCache >= 0, "ERROR: Cache size must be non-negative")
636640

637641
cachePercentage := cache.GetString("percentage")
638-
keepUpdates := cache.GetBool("keep-updates")
642+
deleteOnUpdates := cache.GetBool("delete-on-updates")
639643
cachePercent, err := x.GetCachePercentages(cachePercentage, 3)
640644
x.Check(err)
641645
postingListCacheSize := (cachePercent[0] * (totalCache << 20)) / 100
@@ -658,7 +662,7 @@ func run() {
658662
WALDir: Alpha.Conf.GetString("wal"),
659663
CacheMb: totalCache,
660664
CachePercentage: cachePercentage,
661-
KeepUpdates: keepUpdates,
665+
DeleteOnUpdates: deleteOnUpdates,
662666

663667
MutationsMode: worker.AllowMutations,
664668
AuthToken: security.GetString("token"),
@@ -786,7 +790,7 @@ func run() {
786790
// Posting will initialize index which requires schema. Hence, initialize
787791
// schema before calling posting.Init().
788792
schema.Init(worker.State.Pstore)
789-
posting.Init(worker.State.Pstore, postingListCacheSize, keepUpdates)
793+
posting.Init(worker.State.Pstore, postingListCacheSize, deleteOnUpdates)
790794
defer posting.Cleanup()
791795
worker.Init(worker.State.Pstore)
792796

posting/lists.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -41,12 +41,12 @@ var (
4141
)
4242

4343
// Init initializes the posting lists package, the in memory and dirty list hash.
44-
func Init(ps *badger.DB, cacheSize int64, keepUpdates bool) {
44+
func Init(ps *badger.DB, cacheSize int64, deleteOnUpdates bool) {
4545
pstore = ps
4646
closer = z.NewCloser(1)
4747
go x.MonitorMemoryMetrics(closer)
4848

49-
memoryLayer = initMemoryLayer(cacheSize, keepUpdates)
49+
memoryLayer = initMemoryLayer(cacheSize, deleteOnUpdates)
5050
}
5151

5252
func UpdateMaxCost(maxCost int64) {

posting/mvcc.go

Lines changed: 13 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -63,10 +63,8 @@ type incrRollupi struct {
6363
}
6464

6565
type CachePL struct {
66-
count int
6766
list *List
6867
lastUpdate uint64
69-
lastRead time.Time
7068
}
7169

7270
var (
@@ -401,8 +399,8 @@ func (c *Cache) clear() {
401399
}
402400

403401
type MemoryLayer struct {
404-
keepUpdates bool
405-
cache *Cache
402+
deleteOnUpdates bool
403+
cache *Cache
406404

407405
numDisksRead int
408406
}
@@ -414,9 +412,9 @@ func (ml *MemoryLayer) del(key []byte) {
414412
ml.cache.del(key)
415413
}
416414

417-
func initMemoryLayer(cacheSize int64, keepUpdates bool) *MemoryLayer {
415+
func initMemoryLayer(cacheSize int64, deleteOnUpdates bool) *MemoryLayer {
418416
ml := &MemoryLayer{}
419-
ml.keepUpdates = keepUpdates
417+
ml.deleteOnUpdates = deleteOnUpdates
420418
if cacheSize > 0 {
421419
cache, err := ristretto.NewCache[[]byte, *CachePL](&ristretto.Config[[]byte, *CachePL]{
422420
// Use 5% of cache memory for storing counters.
@@ -449,9 +447,7 @@ func initMemoryLayer(cacheSize int64, keepUpdates bool) *MemoryLayer {
449447

450448
func NewCachePL() *CachePL {
451449
return &CachePL{
452-
count: 0,
453-
list: nil,
454-
lastUpdate: 0,
450+
list: nil,
455451
}
456452
}
457453

@@ -472,7 +468,7 @@ func (ml *MemoryLayer) updateItemInCache(key string, delta []byte, startTs, comm
472468
return
473469
}
474470

475-
if !ml.keepUpdates {
471+
if ml.deleteOnUpdates {
476472
// TODO We should mark the key as deleted instead of directly deleting from the cache.
477473
ml.del([]byte(key))
478474
return
@@ -484,9 +480,8 @@ func (ml *MemoryLayer) updateItemInCache(key string, delta []byte, startTs, comm
484480
}
485481

486482
val.lastUpdate = commitTs
487-
val.count -= 1
488483

489-
if val.list != nil && ml.keepUpdates {
484+
if val.list != nil {
490485
p := new(pb.PostingList)
491486
x.Check(proto.Unmarshal(delta, p))
492487

@@ -640,16 +635,12 @@ func (c *CachePL) Set(l *List, readTs uint64) {
640635
func (ml *MemoryLayer) readFromCache(key []byte, readTs uint64) *List {
641636
cacheItem, ok := ml.cache.get(key)
642637

643-
if ok {
644-
cacheItem.count += 1
645-
cacheItem.lastRead = time.Now()
646-
if cacheItem.list != nil && cacheItem.list.minTs <= readTs {
647-
cacheItem.list.RLock()
648-
lCopy := copyList(cacheItem.list)
649-
cacheItem.list.RUnlock()
650-
checkForRollup(key, lCopy)
651-
return lCopy
652-
}
638+
if ok && cacheItem.list != nil && cacheItem.list.minTs <= readTs {
639+
cacheItem.list.RLock()
640+
lCopy := copyList(cacheItem.list)
641+
cacheItem.list.RUnlock()
642+
checkForRollup(key, lCopy)
643+
return lCopy
653644
}
654645
return nil
655646
}
@@ -679,7 +670,6 @@ func (ml *MemoryLayer) saveInCache(key []byte, l *List) {
679670
l.RLock()
680671
defer l.RUnlock()
681672
cacheItem := NewCachePL()
682-
cacheItem.count = 1
683673
cacheItem.list = copyList(l)
684674
cacheItem.lastUpdate = l.maxTs
685675
ml.cache.set(key, cacheItem)

worker/config.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -63,11 +63,11 @@ type Options struct {
6363
CachePercentage string
6464
// CacheMb is the total memory allocated between all the caches.
6565
CacheMb int64
66-
// KeepUpdates is the parameter that allows the user to set if the cache should keep the items that were
66+
// DeleteOnUpdates is the parameter that allows the user to set if the cache should keep the items that were
6767
// just mutated. Keeping these items are good when there is a mixed workload where you are updating the
6868
// same element multiple times. However, for a heavy mutation workload, not keeping these items would be better
6969
// , as keeping these elements bloats the cache making it slow.
70-
KeepUpdates bool
70+
DeleteOnUpdates bool
7171

7272
Audit *x.LoggerConf
7373

worker/server_state.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ const (
5252
ZeroLimitsDefaults = `uid-lease=0; refill-interval=30s; disable-admin-http=false;`
5353
GraphQLDefaults = `introspection=true; debug=false; extensions=true; poll-interval=1s; ` +
5454
`lambda-url=;`
55-
CacheDefaults = `size-mb=1024; percentage=40,40,20; keep-updates=false`
55+
CacheDefaults = `size-mb=1024; percentage=40,40,20; delete-on-updates=true`
5656
FeatureFlagsDefaults = `normalize-compatibility-mode=`
5757
)
5858

0 commit comments

Comments
 (0)