|
1 | 1 | use std::net::SocketAddr; |
2 | 2 |
|
| 3 | +use alloy_primitives::{Address, B256}; |
3 | 4 | use futures_util::stream::SplitSink; |
4 | 5 | use futures_util::{stream::SplitStream, SinkExt, StreamExt}; |
| 6 | +use monad_exec_events::ExecEvent; |
5 | 7 | use tokio::net::{TcpListener, TcpStream}; |
6 | 8 | use tokio::sync::broadcast; |
7 | 9 | use tokio_tungstenite::{accept_async, tungstenite::Message, WebSocketStream}; |
8 | 10 | use tracing::{error, info, warn}; |
| 11 | +use serde::{Deserialize, Serialize}; |
| 12 | + |
| 13 | +use crate::event_listener::EventName; |
| 14 | +use crate::top_k_tracker::{AccessEntry, TopKTracker}; |
9 | 15 |
|
10 | 16 | use super::event_filter::{ClientMessage, EventFilter}; |
11 | 17 | use super::event_listener::EventData; |
12 | 18 | use super::serializable_event::SerializableEventData; |
13 | 19 |
|
| 20 | +#[derive(Debug, Clone, Serialize, Deserialize)] |
| 21 | +pub struct TopAccessesData { |
| 22 | + pub account: Vec<AccessEntry<Address>>, |
| 23 | + pub storage: Vec<AccessEntry<(Address, B256)>>, |
| 24 | +} |
| 25 | + |
| 26 | +#[derive(Debug, Clone)] |
| 27 | +pub enum EventDataOrAccesses { |
| 28 | + Event(EventData), |
| 29 | + TopAccesses(TopAccessesData), |
| 30 | +} |
| 31 | + |
| 32 | +#[derive(Debug, Clone, Serialize, Deserialize)] |
| 33 | +pub enum ServerMessage { |
| 34 | + Events(Vec<SerializableEventData>), |
| 35 | + TopAccesses(TopAccessesData), |
| 36 | +} |
| 37 | + |
14 | 38 | /// Wait for the client to send a subscription message, with a timeout. |
15 | 39 | /// Returns the filter, or None if the client disconnects or times out. |
16 | 40 | async fn wait_for_subscription( |
@@ -58,49 +82,65 @@ async fn wait_for_subscription( |
58 | 82 | } |
59 | 83 |
|
60 | 84 | async fn client_write_task( |
61 | | - mut event_broadcast_receiver: broadcast::Receiver<EventData>, |
| 85 | + mut event_broadcast_receiver: broadcast::Receiver<EventDataOrAccesses>, |
62 | 86 | filter: EventFilter, |
63 | 87 | addr: SocketAddr, |
64 | 88 | mut ws_sender: SplitSink<WebSocketStream<TcpStream>, Message>, |
65 | 89 | ) { |
66 | | - let mut buf: Vec<SerializableEventData> = Vec::new(); |
| 90 | + let mut events_buf: Vec<SerializableEventData> = Vec::new(); |
| 91 | + let mut accesses_buf: Vec<TopAccessesData> = Vec::new(); |
67 | 92 |
|
68 | 93 | loop { |
69 | 94 | let result = event_broadcast_receiver.recv().await; |
70 | | - match result { |
71 | | - Ok(event_data) => { |
| 95 | + if result.is_err() { |
| 96 | + error!("Broadcast channel error for {}: {}", addr, result.err().unwrap()); |
| 97 | + break; |
| 98 | + } |
| 99 | + let event = result.unwrap(); |
| 100 | + match event { |
| 101 | + EventDataOrAccesses::Event(event_data) => { |
72 | 102 | if filter.matches(&event_data.event_name) { |
73 | | - buf.push(SerializableEventData::from(&event_data)); |
| 103 | + events_buf.push(SerializableEventData::from(&event_data)); |
74 | 104 | } |
75 | 105 | } |
76 | | - Err(e) => { |
77 | | - error!("Broadcast channel error for {}: {}", addr, e); |
78 | | - break; |
| 106 | + EventDataOrAccesses::TopAccesses(top_accesses_data) => { |
| 107 | + accesses_buf.push(top_accesses_data); |
79 | 108 | } |
80 | 109 | } |
81 | | - while let Ok(event_data) = event_broadcast_receiver.try_recv() { |
82 | | - if filter.matches(&event_data.event_name) { |
83 | | - buf.push(SerializableEventData::from(&event_data)); |
| 110 | + while let Ok(event) = event_broadcast_receiver.try_recv() { |
| 111 | + match event { |
| 112 | + EventDataOrAccesses::Event(event_data) => { |
| 113 | + if filter.matches(&event_data.event_name) { |
| 114 | + events_buf.push(SerializableEventData::from(&event_data)); |
| 115 | + } |
| 116 | + } |
| 117 | + EventDataOrAccesses::TopAccesses(top_accesses_data) => { |
| 118 | + accesses_buf.push(top_accesses_data); |
| 119 | + } |
84 | 120 | } |
85 | 121 | } |
86 | 122 |
|
87 | | - if !buf.is_empty() { |
| 123 | + if !events_buf.is_empty() { |
| 124 | + let server_msg = ServerMessage::Events(std::mem::take(&mut events_buf)); |
88 | 125 | // Serialize batch to JSON |
89 | | - let json_message = match serde_json::to_string(&std::mem::take(&mut buf)) { |
90 | | - Ok(json) => json, |
91 | | - Err(e) => { |
92 | | - error!("Failed to serialize batch for {}: {}", addr, e); |
93 | | - buf.clear(); |
94 | | - continue; |
95 | | - } |
96 | | - }; |
| 126 | + let json_message = serde_json::to_string(&server_msg).unwrap(); |
97 | 127 |
|
98 | 128 | // Send batch |
99 | 129 | if let Err(e) = ws_sender.send(Message::Text(json_message)).await { |
100 | 130 | error!("Failed to send batch to {}: {}", addr, e); |
101 | 131 | break; |
102 | 132 | } |
103 | 133 | } |
| 134 | + if !accesses_buf.is_empty() { |
| 135 | + for accesses in std::mem::take(&mut accesses_buf) { |
| 136 | + let server_msg = ServerMessage::TopAccesses(accesses); |
| 137 | + let json_message = serde_json::to_string(&server_msg).unwrap(); |
| 138 | + if let Err(e) = ws_sender.send(Message::Text(json_message)).await { |
| 139 | + error!("Failed to send batch to {}: {}", addr, e); |
| 140 | + break; |
| 141 | + } |
| 142 | + } |
| 143 | + } |
104 | 144 | } |
105 | 145 | } |
106 | 146 |
|
@@ -131,10 +171,63 @@ async fn client_read_task( |
131 | 171 | } |
132 | 172 | } |
133 | 173 |
|
| 174 | +async fn run_event_forwarder_task( |
| 175 | + mut event_receiver: tokio::sync::mpsc::Receiver<EventData>, |
| 176 | + event_broadcast_sender: broadcast::Sender<EventDataOrAccesses>, |
| 177 | +) { |
| 178 | + let mut account_accesses = TopKTracker::new(10_000); |
| 179 | + let mut storage_accesses = TopKTracker::new(10_000); |
| 180 | + let mut stats_interval = tokio::time::interval(std::time::Duration::from_secs(5)); |
| 181 | + |
| 182 | + loop { |
| 183 | + tokio::select! { |
| 184 | + event_data = event_receiver.recv() => { |
| 185 | + if event_data.is_none() { |
| 186 | + warn!("Event receiver closed"); |
| 187 | + return; |
| 188 | + } |
| 189 | + let event_data = event_data.unwrap(); |
| 190 | + |
| 191 | + if let EventName::AccountAccess = event_data.event_name { |
| 192 | + if let ExecEvent::AccountAccess { |
| 193 | + account_access, |
| 194 | + .. |
| 195 | + } = event_data.payload { |
| 196 | + let address = Address::from_slice(&account_access.address.bytes); |
| 197 | + account_accesses.record(address); |
| 198 | + } else { |
| 199 | + unreachable!(); |
| 200 | + } |
| 201 | + } else if let EventName::StorageAccess = event_data.event_name { |
| 202 | + if let ExecEvent::StorageAccess { |
| 203 | + storage_access, |
| 204 | + .. |
| 205 | + } = event_data.payload { |
| 206 | + let address = Address::from_slice(&storage_access.address.bytes); |
| 207 | + let key = B256::from_slice(&storage_access.key.bytes); |
| 208 | + storage_accesses.record((address, key)); |
| 209 | + } else { |
| 210 | + unreachable!(); |
| 211 | + } |
| 212 | + } |
| 213 | + |
| 214 | + let _ = event_broadcast_sender.send(EventDataOrAccesses::Event(event_data)); |
| 215 | + }, |
| 216 | + _ = stats_interval.tick() => { |
| 217 | + let top_accesses_data = TopAccessesData { |
| 218 | + account: account_accesses.top_k(10), |
| 219 | + storage: storage_accesses.top_k(10), |
| 220 | + }; |
| 221 | + let _ = event_broadcast_sender.send(EventDataOrAccesses::TopAccesses(top_accesses_data)); |
| 222 | + } |
| 223 | + } |
| 224 | + } |
| 225 | +} |
| 226 | + |
134 | 227 | async fn handle_connection( |
135 | 228 | stream: TcpStream, |
136 | 229 | addr: SocketAddr, |
137 | | - event_broadcast_receiver: broadcast::Receiver<EventData>, |
| 230 | + event_broadcast_receiver: broadcast::Receiver<EventDataOrAccesses>, |
138 | 231 | ) { |
139 | 232 | info!("New WebSocket connection from: {}", addr); |
140 | 233 |
|
@@ -186,23 +279,17 @@ async fn handle_connection( |
186 | 279 |
|
187 | 280 | pub async fn run_websocket_server( |
188 | 281 | addr: SocketAddr, |
189 | | - mut event_receiver: tokio::sync::mpsc::Receiver<EventData>, |
| 282 | + event_receiver: tokio::sync::mpsc::Receiver<EventData>, |
190 | 283 | ) -> Result<(), Box<dyn std::error::Error>> { |
191 | 284 | // Create a broadcast channel for distributing events to all clients |
192 | | - let (event_broadcast_sender, _) = broadcast::channel::<EventData>(1_000_000); |
| 285 | + let (event_broadcast_sender, _) = broadcast::channel::<EventDataOrAccesses>(1_000_000); |
193 | 286 |
|
194 | 287 | // Spawn a task to forward events from the mpsc channel to the broadcast channel |
195 | 288 | let event_broadcast_sender_clone = event_broadcast_sender.clone(); |
196 | | - let broadcast_task = tokio::spawn(async move { |
197 | | - loop { |
198 | | - let event_data = event_receiver.recv().await; |
199 | | - if event_data.is_none() { |
200 | | - warn!("Event receiver closed"); |
201 | | - return; |
202 | | - } |
203 | | - let _ = event_broadcast_sender_clone.send(event_data.unwrap()); |
204 | | - } |
205 | | - }); |
| 289 | + let broadcast_task = tokio::spawn(run_event_forwarder_task( |
| 290 | + event_receiver, |
| 291 | + event_broadcast_sender_clone, |
| 292 | + )); |
206 | 293 |
|
207 | 294 | // Bind the TCP listener |
208 | 295 | let listener = TcpListener::bind(&addr).await?; |
|
0 commit comments