|
1 | | -// read, copy, update |
2 | 1 | package rcu |
3 | 2 |
|
4 | | -import ( |
5 | | - "slices" |
6 | | - "sync" |
7 | | -) |
| 3 | +import "sync/atomic" |
8 | 4 |
|
9 | | -// Represents a single unit of data that "RCU" Holds. |
10 | | -type Element[T any] struct { |
11 | | - data T |
12 | | - mu *sync.Mutex // guards the "refCount" down below. |
13 | | - refCount int // Read & Writes on "refCount" only happens under the mu lock. |
14 | | -} |
15 | | - |
16 | | -// RCU is a structure that provides a safe way to Write and read |
17 | | -// data. All readers are guaranteed to access to the second latest |
18 | | -// buffer, Using its "Latest()" method. |
19 | 5 | type RCU[T any] struct { |
20 | | - elements []Element[T] |
21 | | - mu sync.RWMutex |
22 | | -} |
23 | | - |
24 | | -func New[T any]() *RCU[T] { |
25 | | - return &RCU[T]{ |
26 | | - // 10 capacity guarantees that no reallocation occur, if and |
27 | | - // only if the program doesn't append more than that. Which |
28 | | - // is unlikely to happen if we configure a timeout deadline on |
29 | | - // the HTTP server. |
30 | | - elements: make([]Element[T], 0, 10), |
31 | | - mu: sync.RWMutex{}, |
32 | | - } |
33 | | -} |
34 | | - |
35 | | -// Rotate adds a new instance of Element to the Elements slice and |
36 | | -// also removes unreferenced elements from the beginning of the slice. |
37 | | -func (rcu *RCU[T]) Rotate() { |
38 | | - rcu.mu.Lock() |
39 | | - defer rcu.mu.Unlock() |
40 | | - |
41 | | - newElem := Element[T]{ |
42 | | - refCount: 0, |
43 | | - mu: &sync.Mutex{}, |
44 | | - } |
45 | | - |
46 | | - rcu.elements = append(rcu.elements, newElem) |
47 | | - |
48 | | - if len(rcu.elements) <= 2 { |
49 | | - return // So there is nothing to clean up. |
50 | | - } |
51 | | - |
52 | | - // Only check up to last two (protect the last two: current and |
53 | | - // previous elements). And do not waste your time if its lock |
54 | | - // acquired. |
55 | | - til := 0 |
56 | | - for i := 0; i < len(rcu.elements)-2; i++ { |
57 | | - |
58 | | - ok := rcu.elements[i].mu.TryLock() |
59 | | - |
60 | | - if !ok { |
61 | | - break |
62 | | - } |
63 | | - |
64 | | - if rcu.elements[i].refCount > 0 { |
65 | | - rcu.elements[i].mu.Unlock() |
66 | | - break // Stop if we hit a referenced element; We only remove consecutive unreferenced elements. |
67 | | - } |
68 | | - til++ |
69 | | - rcu.elements[i].mu.Unlock() |
70 | | - } |
71 | | - |
72 | | - if til > 0 { |
73 | | - rcu.elements = slices.Delete(rcu.elements, 0, til) |
74 | | - } |
| 6 | + p atomic.Pointer[T] |
75 | 7 | } |
76 | 8 |
|
77 | | -type RefDecrementFunc func() |
78 | | - |
79 | | -// returns the most recent valid element. The caller is reponsible for |
80 | | -// decrementing the refCount using the returned "RefDecrementFunc". |
81 | | -func (rcu *RCU[T]) Latest() (*T, RefDecrementFunc) { |
82 | | - rcu.mu.RLock() |
83 | | - |
84 | | - if len(rcu.elements) >= 2 { |
85 | | - index := len(rcu.elements) - 2 |
86 | | - |
87 | | - elem := &rcu.elements[index] |
88 | | - rcu.mu.RUnlock() |
89 | | - |
90 | | - elem.mu.Lock() |
91 | | - elem.refCount++ |
92 | | - elem.mu.Unlock() |
93 | | - |
94 | | - return &elem.data, func() { |
95 | | - elem.mu.Lock() |
96 | | - elem.refCount-- |
97 | | - elem.mu.Unlock() |
98 | | - } |
99 | | - } |
100 | | - |
101 | | - rcu.mu.RUnlock() |
102 | | - return nil, nil |
| 9 | +func (r *RCU[T]) Store(t *T) { |
| 10 | + r.p.Store(t) |
103 | 11 | } |
104 | 12 |
|
105 | | -// Assigns data to the last index of "elements" slice. It doesn't |
106 | | -// need mutual exclution, because only one goroutine manipulates the |
107 | | -// rcu slice. |
108 | | -func (rcu *RCU[T]) Assign(data T) { |
109 | | - l := len(rcu.elements) |
110 | | - rcu.elements[l-1].data = data |
| 13 | +func (r *RCU[T]) Load() *T { |
| 14 | + return r.p.Load() |
111 | 15 | } |
0 commit comments