Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 18 additions & 0 deletions .golangci.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
run:
timeout: 3m

linters:
disable-all: true
enable:
- gosimple
- govet
- ineffassign
- staticcheck
- typecheck
- unused
- gofmt
- revive
- gci
- gofumpt
- whitespace
- gosec
6 changes: 2 additions & 4 deletions cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -296,9 +296,8 @@ func (cache *Cache) AverageAccessTime() int64 {
}
if entryCount == 0 {
return 0
} else {
return totalTime / entryCount
}
return totalTime / entryCount
}

// HitCount is a metric that returns number of times a key was found in the cache.
Expand Down Expand Up @@ -328,9 +327,8 @@ func (cache *Cache) HitRate() float64 {
lookupCount := hitCount + missCount
if lookupCount == 0 {
return 0
} else {
return float64(hitCount) / float64(lookupCount)
}
return float64(hitCount) / float64(lookupCount)
}

// OverwriteCount indicates the number of times entries have been overriden.
Expand Down
35 changes: 21 additions & 14 deletions cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ import (
"errors"
"fmt"
"log"
mrand "math/rand"
"math/big"
"strconv"
"strings"
"sync"
Expand Down Expand Up @@ -65,7 +65,7 @@ func TestFreeCache(t *testing.T) {
if !affected {
t.Error("del should return affected true")
}
value, err = cache.Get(key)
_, err = cache.Get(key)
if err != ErrNotFound {
t.Error("error should be ErrNotFound after being deleted")
}
Expand Down Expand Up @@ -112,7 +112,7 @@ func TestFreeCache(t *testing.T) {
t.Errorf("value is %v, expected %v", string(value), expectedValStr)
}
}
err = cache.GetFn([]byte(keyStr), func(val []byte) error {
_ = cache.GetFn([]byte(keyStr), func(val []byte) error {
if string(val) != expectedValStr {
t.Errorf("getfn: value is %v, expected %v", string(val), expectedValStr)
}
Expand Down Expand Up @@ -213,7 +213,7 @@ func TestGetWithExpiration(t *testing.T) {

res, expiry, err := cache.GetWithExpiration(key)
var expireTime time.Time
var startTime = time.Now()
startTime := time.Now()
for {
_, _, err := cache.GetWithExpiration(key)
expireTime = time.Now()
Expand Down Expand Up @@ -283,7 +283,6 @@ func testTTLWithNoExpireKey(t *testing.T) {

// act
ttl, err := cache.TTL(key)

// assert
if err != nil {
t.Errorf("expected nil, but got %v", err)
Expand Down Expand Up @@ -315,7 +314,6 @@ func testTTLWithNotYetExpiredKey(t *testing.T) {

// act
ttl, err := cache.TTL(key)

// assert
if err != nil {
t.Errorf("expected nil, but got %v", err)
Expand Down Expand Up @@ -498,12 +496,12 @@ func TestLargeEntry(t *testing.T) {
if err != ErrLargeKey {
t.Error("large key should return ErrLargeKey")
}
val, err = cache.Get(key)
if val != nil {
t.Error("value should be nil when get a big key")
_, err = cache.Get(key)
if err != ErrNotFound {
t.Error("error should be NotFound")
}
key = []byte("abcd")
maxValLen := cacheSize/1024 - ENTRY_HDR_SIZE - len(key)
maxValLen := cacheSize/1024 - entryHdrSize - len(key)
val = make([]byte, maxValLen+1)
err = cache.Set(key, val, 0)
if err != ErrLargeEntry {
Expand Down Expand Up @@ -650,15 +648,15 @@ func TestSetLargerEntryDeletesWrongEntry(t *testing.T) {

func TestRace(t *testing.T) {
cache := NewCache(minBufSize)
inUse := 8
var inUse int64 = 8
wg := sync.WaitGroup{}
var iters int64 = 1000

wg.Add(6)
addFunc := func() {
var i int64
for i = 0; i < iters; i++ {
err := cache.SetInt(int64(mrand.Intn(inUse)), []byte("abc"), 1)
err := cache.SetInt(randInt64(inUse), []byte("abc"), 1)
if err != nil {
t.Errorf("err: %s", err)
}
Expand All @@ -668,14 +666,14 @@ func TestRace(t *testing.T) {
getFunc := func() {
var i int64
for i = 0; i < iters; i++ {
_, _ = cache.GetInt(int64(mrand.Intn(inUse))) // it will likely error w/ delFunc running too
_, _ = cache.GetInt(randInt64(inUse)) // it will likely error w/ delFunc running too
}
wg.Done()
}
delFunc := func() {
var i int64
for i = 0; i < iters; i++ {
cache.DelInt(int64(mrand.Intn(inUse)))
cache.DelInt(randInt64(inUse))
}
wg.Done()
}
Expand Down Expand Up @@ -780,6 +778,7 @@ func BenchmarkCacheSet(b *testing.B) {
cache.Set(key[:], make([]byte, 8), 0)
}
}

func BenchmarkParallelCacheSet(b *testing.B) {
cache := NewCache(256 * 1024 * 1024)
var key [8]byte
Expand Down Expand Up @@ -1100,3 +1099,11 @@ func TestBenchmarkCacheSet(t *testing.T) {
t.Errorf("current alloc count '%d' is higher than 0", alloc)
}
}

func randInt64(max int64) int64 {
nBig, err := rand.Int(rand.Reader, big.NewInt(max))
if err != nil {
panic(err)
}
return nBig.Int64()
}
8 changes: 4 additions & 4 deletions iterator.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,22 +49,22 @@ func (it *Iterator) nextForSegment(segIdx int) *Entry {
return nil
}

func (it *Iterator) nextForSlot(seg *segment, slotId int) *Entry {
func (it *Iterator) nextForSlot(seg *segment, slotID int) *Entry {
slotOff := int32(it.slotIdx) * seg.slotCap
slot := seg.slotsData[slotOff : slotOff+seg.slotLens[it.slotIdx] : slotOff+seg.slotCap]
for it.entryIdx < len(slot) {
ptr := slot[it.entryIdx]
it.entryIdx++
now := seg.timer.Now()
var hdrBuf [ENTRY_HDR_SIZE]byte
var hdrBuf [entryHdrSize]byte
seg.rb.ReadAt(hdrBuf[:], ptr.offset)
hdr := (*entryHdr)(unsafe.Pointer(&hdrBuf[0]))
if hdr.expireAt == 0 || hdr.expireAt > now {
entry := new(Entry)
entry.Key = make([]byte, hdr.keyLen)
entry.Value = make([]byte, hdr.valLen)
seg.rb.ReadAt(entry.Key, ptr.offset+ENTRY_HDR_SIZE)
seg.rb.ReadAt(entry.Value, ptr.offset+ENTRY_HDR_SIZE+int64(hdr.keyLen))
seg.rb.ReadAt(entry.Key, ptr.offset+entryHdrSize)
seg.rb.ReadAt(entry.Value, ptr.offset+entryHdrSize+int64(hdr.keyLen))
return entry
}
}
Expand Down
26 changes: 13 additions & 13 deletions ringbuf.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ type RingBuf struct {
begin int64 // beginning offset of the data stream.
end int64 // ending offset of the data stream.
data []byte
index int //range from '0' to 'len(rb.data)-1'
index int // range from '0' to 'len(rb.data)-1'
}

func NewRingBuf(size int, begin int64) (rb RingBuf) {
Expand All @@ -28,7 +28,8 @@ func NewRingBuf(size int, begin int64) (rb RingBuf) {
// Reset the ring buffer
//
// Parameters:
// begin: beginning offset of the data stream
//
// begin: beginning offset of the data stream
func (rb *RingBuf) Reset(begin int64) {
rb.begin = begin
rb.end = begin
Expand Down Expand Up @@ -161,15 +162,14 @@ func (rb *RingBuf) EqualAt(p []byte, off int64) bool {
readEnd := readOff + len(p)
if readEnd <= len(rb.data) {
return bytes.Equal(p, rb.data[readOff:readEnd])
} else {
firstLen := len(rb.data) - readOff
equal := bytes.Equal(p[:firstLen], rb.data[readOff:])
if equal {
secondLen := len(p) - firstLen
equal = bytes.Equal(p[firstLen:], rb.data[:secondLen])
}
return equal
}
firstLen := len(rb.data) - readOff
equal := bytes.Equal(p[:firstLen], rb.data[readOff:])
if equal {
secondLen := len(p) - firstLen
equal = bytes.Equal(p[firstLen:], rb.data[:secondLen])
}
return equal
}

// Evacuate read the data at off, then write it to the the data stream,
Expand All @@ -186,21 +186,21 @@ func (rb *RingBuf) Evacuate(off int64, length int) (newOff int64) {
rb.index -= len(rb.data)
}
} else if readOff < rb.index {
var n = copy(rb.data[rb.index:], rb.data[readOff:readOff+length])
n := copy(rb.data[rb.index:], rb.data[readOff:readOff+length])
rb.index += n
if rb.index == len(rb.data) {
rb.index = copy(rb.data, rb.data[readOff+n:readOff+length])
}
} else {
var readEnd = readOff + length
readEnd := readOff + length
var n int
if readEnd <= len(rb.data) {
n = copy(rb.data[rb.index:], rb.data[readOff:readEnd])
rb.index += n
} else {
n = copy(rb.data[rb.index:], rb.data[readOff:])
rb.index += n
var tail = length - n
tail := length - n
n = copy(rb.data[rb.index:], rb.data[:tail])
rb.index += n
if rb.index == len(rb.data) {
Expand Down
Loading