|
| 1 | +use event_listener::{Event, EventListener}; |
| 2 | +use futures_lite::Stream; |
| 3 | +use parking_lot::Mutex; |
| 4 | +use std::pin::Pin; |
| 5 | +use std::sync::Arc; |
| 6 | +use std::sync::atomic::{AtomicU64, Ordering}; |
| 7 | +use std::task::{Context, Poll, ready}; |
| 8 | + |
| 9 | +#[derive(Debug, Clone, Copy, PartialEq, Eq)] |
| 10 | +pub enum RecvError { |
| 11 | + Lagged(u64), |
| 12 | + Closed, |
| 13 | +} |
| 14 | + |
| 15 | +#[derive(Debug, Clone, Copy, PartialEq, Eq)] |
| 16 | +pub enum StreamRecvError { |
| 17 | + Lagged(u64), |
| 18 | +} |
| 19 | + |
| 20 | +#[derive(Debug)] |
| 21 | +struct Slot<T> { |
| 22 | + seq: u64, |
| 23 | + val: Option<T>, |
| 24 | +} |
| 25 | + |
| 26 | +#[derive(Debug)] |
| 27 | +struct Ring<T> { |
| 28 | + slots: Vec<Mutex<Slot<T>>>, // per-slot mutex |
| 29 | + mask: usize, |
| 30 | + head: AtomicU64, // next free sequence number |
| 31 | + event: Event, |
| 32 | + closed: AtomicU64, |
| 33 | +} |
| 34 | + |
| 35 | +impl<T> Ring<T> { |
| 36 | + fn new(mut capacity: usize) -> Arc<Self> { |
| 37 | + if capacity == 0 { |
| 38 | + capacity = 1; |
| 39 | + } else if !capacity.is_power_of_two() { |
| 40 | + capacity = capacity.next_power_of_two(); |
| 41 | + } |
| 42 | + |
| 43 | + let mut slots = Vec::with_capacity(capacity); |
| 44 | + for _ in 0..capacity { |
| 45 | + slots.push(Mutex::new(Slot { seq: 0, val: None })); |
| 46 | + } |
| 47 | + |
| 48 | + Arc::new(Self { |
| 49 | + slots, |
| 50 | + mask: capacity - 1, |
| 51 | + head: AtomicU64::new(0), |
| 52 | + event: Event::new(), |
| 53 | + closed: AtomicU64::new(0), |
| 54 | + }) |
| 55 | + } |
| 56 | +} |
| 57 | + |
| 58 | +#[derive(Debug)] |
| 59 | +pub enum TrySendError<T> { |
| 60 | + Closed(T), |
| 61 | +} |
| 62 | + |
| 63 | +#[derive(Debug, Clone)] |
| 64 | +pub struct Sender<T> { |
| 65 | + ring: Arc<Ring<T>>, |
| 66 | +} |
| 67 | + |
| 68 | +impl<T> Sender<T> { |
| 69 | + pub fn try_send(&self, val: T) -> Result<(), TrySendError<T>> { |
| 70 | + if self.ring.closed.load(Ordering::Relaxed) == 1 { |
| 71 | + return Err(TrySendError::Closed(val)); |
| 72 | + } |
| 73 | + |
| 74 | + // Atomically claim slot |
| 75 | + let seq = self.ring.head.fetch_add(1, Ordering::AcqRel); |
| 76 | + let idx = (seq as usize) & self.ring.mask; |
| 77 | + |
| 78 | + let mut slot = self.ring.slots[idx].lock(); |
| 79 | + slot.val = Some(val); |
| 80 | + slot.seq = seq; |
| 81 | + |
| 82 | + // there's only 1 consumer |
| 83 | + self.ring.event.notify(1); |
| 84 | + Ok(()) |
| 85 | + } |
| 86 | +} |
| 87 | + |
| 88 | +impl<T> Drop for Sender<T> { |
| 89 | + fn drop(&mut self) { |
| 90 | + // TODO: there's no notification for receiver when no senders left. |
| 91 | + // self.ring.closed.store(1, Ordering::Release); |
| 92 | + // self.ring.event.notify(usize::MAX); |
| 93 | + } |
| 94 | +} |
| 95 | + |
| 96 | +#[derive(Debug)] |
| 97 | +pub struct Receiver<T> { |
| 98 | + ring: Arc<Ring<T>>, |
| 99 | + next_seq: u64, |
| 100 | + local_head: u64, |
| 101 | + listener: Option<EventListener>, |
| 102 | +} |
| 103 | + |
| 104 | +impl<T> Drop for Receiver<T> { |
| 105 | + fn drop(&mut self) { |
| 106 | + self.ring.closed.store(1, Ordering::Release); |
| 107 | + } |
| 108 | +} |
| 109 | + |
| 110 | +impl<T: Clone> Receiver<T> { |
| 111 | + pub async fn recv(&mut self) -> Result<T, RecvError> { |
| 112 | + std::future::poll_fn(|cx| self.poll_recv(cx)).await |
| 113 | + } |
| 114 | + |
| 115 | + pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Result<T, RecvError>> { |
| 116 | + loop { |
| 117 | + let coop = std::task::ready!(tokio::task::coop::poll_proceed(cx)); |
| 118 | + |
| 119 | + // Refresh local snapshot of head |
| 120 | + if self.next_seq == self.local_head { |
| 121 | + self.local_head = self.ring.head.load(Ordering::Acquire); |
| 122 | + } |
| 123 | + |
| 124 | + let capacity = (self.ring.mask as u64) + 1; |
| 125 | + let earliest = self.local_head.saturating_sub(capacity); |
| 126 | + |
| 127 | + // Closed + nothing left |
| 128 | + if self.ring.closed.load(Ordering::Acquire) == 1 && self.next_seq >= self.local_head { |
| 129 | + return Poll::Ready(Err(RecvError::Closed)); |
| 130 | + } |
| 131 | + |
| 132 | + // No new items |
| 133 | + if self.next_seq >= self.local_head { |
| 134 | + match &mut self.listener { |
| 135 | + Some(l) => { |
| 136 | + if Pin::new(l).poll(cx).is_pending() { |
| 137 | + return Poll::Pending; |
| 138 | + } |
| 139 | + self.listener = None; |
| 140 | + continue; |
| 141 | + } |
| 142 | + None => { |
| 143 | + self.listener = Some(self.ring.event.listen()); |
| 144 | + continue; |
| 145 | + } |
| 146 | + } |
| 147 | + } |
| 148 | + |
| 149 | + let idx = (self.next_seq as usize) & self.ring.mask; |
| 150 | + let slot = self.ring.slots[idx].lock(); |
| 151 | + let slot_seq = slot.seq; |
| 152 | + |
| 153 | + if slot_seq < earliest { |
| 154 | + self.next_seq = self.local_head; |
| 155 | + return Poll::Ready(Err(RecvError::Lagged(self.local_head))); |
| 156 | + } |
| 157 | + |
| 158 | + if slot_seq != self.next_seq { |
| 159 | + self.next_seq = self.local_head; |
| 160 | + return Poll::Ready(Err(RecvError::Lagged(self.local_head))); |
| 161 | + } |
| 162 | + |
| 163 | + if let Some(ref v) = slot.val { |
| 164 | + coop.made_progress(); |
| 165 | + let out = v.clone(); |
| 166 | + self.next_seq += 1; |
| 167 | + return Poll::Ready(Ok(out)); |
| 168 | + } |
| 169 | + |
| 170 | + self.next_seq = self.local_head; |
| 171 | + return Poll::Ready(Err(RecvError::Lagged(self.local_head))); |
| 172 | + } |
| 173 | + } |
| 174 | +} |
| 175 | + |
| 176 | +impl<T: Clone> Stream for Receiver<T> { |
| 177 | + type Item = Result<T, StreamRecvError>; |
| 178 | + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { |
| 179 | + let this = self.get_mut(); |
| 180 | + let res = match ready!(this.poll_recv(cx)) { |
| 181 | + Ok(item) => Some(Ok(item)), |
| 182 | + Err(RecvError::Lagged(n)) => Some(Err(StreamRecvError::Lagged(n))), |
| 183 | + Err(RecvError::Closed) => None, |
| 184 | + }; |
| 185 | + Poll::Ready(res) |
| 186 | + } |
| 187 | +} |
| 188 | + |
| 189 | +impl<T: Clone> Clone for Receiver<T> { |
| 190 | + fn clone(&self) -> Self { |
| 191 | + Self { |
| 192 | + ring: self.ring.clone(), |
| 193 | + next_seq: self.next_seq, |
| 194 | + local_head: self.local_head, |
| 195 | + listener: None, |
| 196 | + } |
| 197 | + } |
| 198 | +} |
| 199 | + |
| 200 | +pub fn channel<T: Send + Sync + Clone + 'static>(capacity: usize) -> (Sender<T>, Receiver<T>) { |
| 201 | + let ring = Ring::new(capacity); |
| 202 | + ( |
| 203 | + Sender { ring: ring.clone() }, |
| 204 | + Receiver { |
| 205 | + ring, |
| 206 | + next_seq: 0, |
| 207 | + local_head: 0, |
| 208 | + listener: None, |
| 209 | + }, |
| 210 | + ) |
| 211 | +} |
| 212 | + |
| 213 | +#[cfg(test)] |
| 214 | +mod tests { |
| 215 | + use super::*; |
| 216 | + use tokio::time::Duration; |
| 217 | + |
| 218 | + #[tokio::test] |
| 219 | + async fn basic_send_recv() { |
| 220 | + let (tx, mut rx) = channel::<u64>(8); |
| 221 | + |
| 222 | + tx.try_send(42).unwrap(); |
| 223 | + assert_eq!(rx.recv().await, Ok(42)); |
| 224 | + |
| 225 | + tx.try_send(123).unwrap(); |
| 226 | + assert_eq!(rx.recv().await, Ok(123)); |
| 227 | + } |
| 228 | + |
| 229 | + #[tokio::test] |
| 230 | + async fn lagging_consumer_jumps_to_head() { |
| 231 | + let (tx, mut rx) = channel::<u64>(4); |
| 232 | + |
| 233 | + for i in 0..6 { |
| 234 | + tx.try_send(i).unwrap(); |
| 235 | + } |
| 236 | + |
| 237 | + match rx.recv().await { |
| 238 | + Err(RecvError::Lagged(seq)) => assert_eq!(seq, 6), |
| 239 | + _ => panic!("Expected lag error"), |
| 240 | + } |
| 241 | + |
| 242 | + tx.try_send(6).unwrap(); |
| 243 | + assert_eq!(rx.recv().await, Ok(6)); |
| 244 | + } |
| 245 | + |
| 246 | + #[tokio::test] |
| 247 | + async fn close_signal_drains_then_stops() { |
| 248 | + let (tx, mut rx) = channel::<u64>(4); |
| 249 | + |
| 250 | + tx.try_send(1).unwrap(); |
| 251 | + tx.try_send(2).unwrap(); |
| 252 | + |
| 253 | + drop(tx); |
| 254 | + |
| 255 | + assert_eq!(rx.recv().await, Ok(1)); |
| 256 | + assert_eq!(rx.recv().await, Ok(2)); |
| 257 | + assert_eq!(rx.recv().await, Err(RecvError::Closed)); |
| 258 | + assert_eq!(rx.recv().await, Err(RecvError::Closed)); |
| 259 | + } |
| 260 | + |
| 261 | + #[tokio::test] |
| 262 | + async fn async_waker_notification() { |
| 263 | + let (tx, mut rx) = channel::<u64>(4); |
| 264 | + |
| 265 | + let handle = tokio::spawn(async move { rx.recv().await }); |
| 266 | + |
| 267 | + tokio::time::sleep(Duration::from_millis(10)).await; |
| 268 | + tx.try_send(99).unwrap(); |
| 269 | + |
| 270 | + let result = handle.await.unwrap(); |
| 271 | + assert_eq!(result, Ok(99)); |
| 272 | + } |
| 273 | + |
| 274 | + #[tokio::test] |
| 275 | + async fn buffer_wrap_around_behavior() { |
| 276 | + let (tx, mut rx) = channel::<u64>(4); |
| 277 | + |
| 278 | + for i in 0..4 { |
| 279 | + tx.try_send(i).unwrap(); |
| 280 | + } |
| 281 | + |
| 282 | + assert_eq!(rx.recv().await, Ok(0)); |
| 283 | + assert_eq!(rx.recv().await, Ok(1)); |
| 284 | + |
| 285 | + tx.try_send(4).unwrap(); |
| 286 | + tx.try_send(5).unwrap(); |
| 287 | + |
| 288 | + assert_eq!(rx.recv().await, Ok(2)); |
| 289 | + assert_eq!(rx.recv().await, Ok(3)); |
| 290 | + assert_eq!(rx.recv().await, Ok(4)); |
| 291 | + assert_eq!(rx.recv().await, Ok(5)); |
| 292 | + } |
| 293 | +} |
0 commit comments