Skip to content
Open
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions fastcache.go
Original file line number Diff line number Diff line change
Expand Up @@ -211,6 +211,18 @@ func (c *Cache) UpdateStats(s *Stats) {
s.InvalidValueHashErrors += atomic.LoadUint64(&c.bigStats.InvalidValueHashErrors)
}

func (c *Cache) ReloadFromFile(path string) error {
c.Reset()
var err error
c, err = load(c, path, 0)
return err
}

func (c *Cache) Close() error {
c.Reset()
return clearChunks()
}

type bucket struct {
mu sync.RWMutex

Expand Down
15 changes: 10 additions & 5 deletions file.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,15 +80,15 @@ func (c *Cache) SaveToFileConcurrent(filePath string, concurrency int) error {
//
// See SaveToFile* for saving cache data to file.
func LoadFromFile(filePath string) (*Cache, error) {
return load(filePath, 0)
return load(nil, filePath, 0)
}

// LoadFromFileOrNew tries loading cache data from the given filePath.
//
// The function falls back to creating new cache with the given maxBytes
// capacity if error occurs during loading the cache from file.
func LoadFromFileOrNew(filePath string, maxBytes int) *Cache {
c, err := load(filePath, maxBytes)
c, err := load(nil, filePath, maxBytes)
if err == nil {
return c
}
Expand Down Expand Up @@ -125,7 +125,7 @@ func (c *Cache) save(dir string, workersCount int) error {
return err
}

func load(filePath string, maxBytes int) (*Cache, error) {
func load(oldCache *Cache, filePath string, maxBytes int) (*Cache, error) {
maxBucketChunks, err := loadMetadata(filePath)
if err != nil {
return nil, err
Expand All @@ -152,7 +152,12 @@ func load(filePath string, maxBytes int) (*Cache, error) {
}
results := make(chan error)
workersCount := 0
var c Cache
var c *Cache
if oldCache != nil {
c = oldCache
} else {
c = &Cache{}
}
for _, fi := range fis {
fn := fi.Name()
if fi.IsDir() || !dataFileRegexp.MatchString(fn) {
Expand Down Expand Up @@ -183,7 +188,7 @@ func load(filePath string, maxBytes int) (*Cache, error) {
b.m = make(map[uint64]uint64)
}
}
return &c, nil
return c, nil
}

func saveMetadata(c *Cache, dir string) error {
Expand Down
5 changes: 5 additions & 0 deletions malloc_heap.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,3 +9,8 @@ func getChunk() []byte {
func putChunk(chunk []byte) {
// No-op.
}

func clearChunks() error {
// No-op.
return nil
}
19 changes: 18 additions & 1 deletion malloc_mmap.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,10 @@ import (
const chunksPerAlloc = 1024

var (
freeChunks []*[chunkSize]byte
// chunks to hand out to the library
freeChunks []*[chunkSize]byte
// orignal slice from Mmap for book-keeping
baseChunks [][]byte
freeChunksLock sync.Mutex
)

Expand All @@ -26,6 +29,7 @@ func getChunk() []byte {
if err != nil {
panic(fmt.Errorf("cannot allocate %d bytes via mmap: %s", chunkSize*chunksPerAlloc, err))
}
baseChunks = append(baseChunks, data)
for len(data) > 0 {
p := (*[chunkSize]byte)(unsafe.Pointer(&data[0]))
freeChunks = append(freeChunks, p)
Expand All @@ -51,3 +55,16 @@ func putChunk(chunk []byte) {
freeChunks = append(freeChunks, p)
freeChunksLock.Unlock()
}

func clearChunks() error {
freeChunksLock.Lock()
defer freeChunksLock.Unlock()
freeChunks = nil
for _, data := range baseChunks {
baseChunk := data
if err := unix.Munmap(baseChunk); err != nil {
return err
}
}
return nil
}