-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcached.go
More file actions
105 lines (97 loc) · 3.2 KB
/
cached.go
File metadata and controls
105 lines (97 loc) · 3.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
// Package cachedrander provides a reader designed to cache random data for the
// creation of random UUIDs. Using rand.Reader as the source of random data
// (the default for github.com/google/uuid) requires a mutex operation per newly
// minted version 4 (random) UUID. This package typically only requires a
// single atomic.AddUint64 per newly minted UUID.
//
// This package works by having two pages of cached random data. The first page
// is read when the CachedReader is created. Once that page has been exhausted
// Read calls will block on a mutex while the second page is being loaded.
//
// This package has a theoretical race condition:
//
// Caller A reads the index of its data in the current page and is prempted.
// Prior to resuming a sufficent number of calls to Read are made to exhaust the
// current page and the next loaded page. It is now possible for caller A to
// return the same data as another caller.
//
// to mitigate this condition the CachedReader should use a sufficiently large
// cache that the probability of this happening is essentially 0.
package cachedrander
import (
"crypto/rand"
"io"
"sync"
"sync/atomic"
)
// A CachedReader caches chunks of data from a reader and then provides that
// data to calls to its Read method.
//
// The Max value determines the maximum size read that will be honored. This
// defaults to 16 (the size of a UUID). Max should only be set prior to the
// first Read of the CachedReader. Max should be multiple times smaller than
// the size of the cache.
type CachedReader struct {
Max int
mu sync.Mutex
pages [2][]byte
size uint64
index uint64
r io.Reader
}
// NewUUIDReader returns a CachedReader that caches n UUID's worth of data from
// rand.Reader at a time. The value of n should be sufficiently large to
// prevent the theoretical race conditioned mentioned above (e.g., 100 or 1000)
func NewUUIDReader(n int) (*CachedReader, error) {
return New(rand.Reader, n*16)
}
// New returns a new CachedReader that caches size bytes from r at a time. An
// error is returned if filling the initial cache from r returns an error.
func New(r io.Reader, size int) (*CachedReader, error) {
nr := &CachedReader{
Max: 16,
size: uint64(size),
pages: [2][]byte{make([]byte, size), make([]byte, size)},
r: r,
}
// Fill the first cache buffer
if _, err := io.ReadFull(r, nr.pages[0]); err != nil {
return nil, err
}
return nr, nil
}
const (
indexBits = 63
indexMask = (1 << indexBits) - 1
)
// Read fills buf with cached data
func (r *CachedReader) Read(buf []byte) (int, error) {
if len(buf) > r.Max {
buf = buf[:r.Max]
}
blen := uint64(len(buf))
for {
ai := atomic.AddUint64(&r.index, blen)
page := int(ai >> indexBits)
i := ai & indexMask
if i-blen <= r.size {
return copy(buf, r.pages[page][i-blen:]), nil
}
if err := r.fill(); err != nil {
return 0, err
}
}
}
// fill fills in the cache page we are currently not reading from.
func (r *CachedReader) fill() error {
r.mu.Lock()
ai := atomic.LoadUint64(&r.index)
var err error
if (ai & indexMask) > r.size {
page := (ai >> indexBits) ^ 1
_, err = io.ReadFull(r.r, r.pages[page])
atomic.StoreUint64(&r.index, uint64(page)<<indexBits)
}
r.mu.Unlock()
return err
}