|
| 1 | +package lockfree |
| 2 | + |
| 3 | +import ( |
| 4 | + "sync/atomic" |
| 5 | + "unsafe" |
| 6 | +) |
| 7 | + |
| 8 | +// Queue implements lock-free FIFO freelist based queue. |
| 9 | +// ref: https://dl.acm.org/citation.cfm?doid=248052.248106 |
| 10 | +type Queue struct { |
| 11 | + head unsafe.Pointer |
| 12 | + tail unsafe.Pointer |
| 13 | + len uint64 |
| 14 | +} |
| 15 | + |
| 16 | +// NewQueue creates a new lock-free queue. |
| 17 | +func NewQueue() *Queue { |
| 18 | + head := queueitem{next: nil, v: nil} // allocate a free item |
| 19 | + return &Queue{ |
| 20 | + tail: unsafe.Pointer(&head), // both head and tail points |
| 21 | + head: unsafe.Pointer(&head), // to the free item |
| 22 | + } |
| 23 | +} |
| 24 | + |
| 25 | +// Enqueue puts the given value v at the tail of the queue. |
| 26 | +func (q *Queue) Enqueue(v interface{}) { |
| 27 | + item := &queueitem{next: nil, v: v} // allocate new item |
| 28 | + var last, lastnext *queueitem |
| 29 | + for { |
| 30 | + last = loadqitem(&q.tail) |
| 31 | + lastnext = loadqitem(&last.next) |
| 32 | + if loadqitem(&q.tail) == last { // are tail and next consistent? |
| 33 | + if lastnext == nil { // was tail pointing to the last node? |
| 34 | + if casqitem(&last.next, lastnext, item) { // try to link item at the end of linked list |
| 35 | + casqitem(&q.tail, last, item) // enqueue is done. try swing tail to the inserted node |
| 36 | + atomic.AddUint64(&q.len, 1) |
| 37 | + return |
| 38 | + } |
| 39 | + } else { // tail was not pointing to the last node |
| 40 | + casqitem(&q.tail, last, lastnext) // try swing tail to the next node |
| 41 | + } |
| 42 | + } |
| 43 | + } |
| 44 | +} |
| 45 | + |
| 46 | +// Dequeue removes and returns the value at the head of the queue. |
| 47 | +// It returns nil if the queue is empty. |
| 48 | +func (q *Queue) Dequeue() interface{} { |
| 49 | + var first, last, firstnext *queueitem |
| 50 | + for { |
| 51 | + first = loadqitem(&q.head) |
| 52 | + last = loadqitem(&q.tail) |
| 53 | + firstnext = loadqitem(&first.next) |
| 54 | + if first == loadqitem(&q.head) { // are head, tail and next consistent? |
| 55 | + if first == last { // is queue empty? |
| 56 | + if firstnext == nil { // queue is empty, couldn't dequeue |
| 57 | + return nil |
| 58 | + } |
| 59 | + casqitem(&q.tail, last, firstnext) // tail is falling behind, try to advance it |
| 60 | + } else { // read value before cas, otherwise another dequeue might free the next node |
| 61 | + v := firstnext.v |
| 62 | + if casqitem(&q.head, first, firstnext) { // try to swing head to the next node |
| 63 | + atomic.AddUint64(&q.len, ^uint64(0)) |
| 64 | + return v // queue was not empty and dequeue finished. |
| 65 | + } |
| 66 | + } |
| 67 | + } |
| 68 | + } |
| 69 | +} |
| 70 | + |
| 71 | +// Length returns the length of the queue. |
| 72 | +func (q *Queue) Length() uint64 { |
| 73 | + return atomic.LoadUint64(&q.len) |
| 74 | +} |
| 75 | + |
| 76 | +type queueitem struct { |
| 77 | + next unsafe.Pointer |
| 78 | + v interface{} |
| 79 | +} |
| 80 | + |
| 81 | +func loadqitem(p *unsafe.Pointer) *queueitem { |
| 82 | + return (*queueitem)(atomic.LoadPointer(p)) |
| 83 | +} |
| 84 | +func casqitem(p *unsafe.Pointer, old, new *queueitem) bool { |
| 85 | + return atomic.CompareAndSwapPointer(p, unsafe.Pointer(old), unsafe.Pointer(new)) |
| 86 | +} |
0 commit comments