forked from hyperium/hyper
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathready_stream.rs
More file actions
249 lines (225 loc) · 8.44 KB
/
ready_stream.rs
File metadata and controls
249 lines (225 loc) · 8.44 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
use http_body_util::StreamBody;
use hyper::body::Bytes;
use hyper::body::Frame;
use hyper::rt::{Read, ReadBufCursor, Write};
use hyper::server::conn::http1;
use hyper::service::service_fn;
use hyper::{Response, StatusCode};
use pin_project_lite::pin_project;
use std::convert::Infallible;
use std::io;
use std::pin::Pin;
use std::task::{ready, Context, Poll};
use tokio::sync::mpsc;
use tracing::{error, info};
pin_project! {
#[derive(Debug)]
pub struct TxReadyStream {
#[pin]
read_rx: mpsc::UnboundedReceiver<Vec<u8>>,
write_tx: mpsc::UnboundedSender<Vec<u8>>,
read_buffer: Vec<u8>,
poll_since_write:bool,
flush_count: usize,
panic_task: Option<tokio::task::JoinHandle<()>>,
}
}
impl TxReadyStream {
fn new(
read_rx: mpsc::UnboundedReceiver<Vec<u8>>,
write_tx: mpsc::UnboundedSender<Vec<u8>>,
) -> Self {
Self {
read_rx,
write_tx,
read_buffer: Vec::new(),
poll_since_write: true,
flush_count: 0,
panic_task: None,
}
}
/// Create a new pair of connected ReadyStreams. Returns two streams that are connected to each other.
fn new_pair() -> (Self, Self) {
let (s1_tx, s2_rx) = mpsc::unbounded_channel();
let (s2_tx, s1_rx) = mpsc::unbounded_channel();
let s1 = Self::new(s1_rx, s1_tx);
let s2 = Self::new(s2_rx, s2_tx);
(s1, s2)
}
/// Send data to the other end of the stream (this will be available for reading on the other stream)
fn send(&self, data: &[u8]) -> Result<(), mpsc::error::SendError<Vec<u8>>> {
self.write_tx.send(data.to_vec())
}
/// Receive data written to this stream by the other end (async)
async fn recv(&mut self) -> Option<Vec<u8>> {
self.read_rx.recv().await
}
}
impl Read for TxReadyStream {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
mut buf: ReadBufCursor<'_>,
) -> Poll<io::Result<()>> {
let mut this = self.as_mut().project();
// First, try to satisfy the read request from the internal buffer
if !this.read_buffer.is_empty() {
let to_read = std::cmp::min(this.read_buffer.len(), buf.remaining());
// Copy data from internal buffer to the read buffer
buf.put_slice(&this.read_buffer[..to_read]);
// Remove the consumed data from the internal buffer
this.read_buffer.drain(..to_read);
return Poll::Ready(Ok(()));
}
// If internal buffer is empty, try to get data from the channel
match this.read_rx.try_recv() {
Ok(data) => {
// Copy as much data as we can fit in the buffer
let to_read = std::cmp::min(data.len(), buf.remaining());
buf.put_slice(&data[..to_read]);
// Store any remaining data in the internal buffer for next time
if to_read < data.len() {
let remaining = &data[to_read..];
this.read_buffer.extend_from_slice(remaining);
}
Poll::Ready(Ok(()))
}
Err(mpsc::error::TryRecvError::Empty) => {
match ready!(this.read_rx.poll_recv(cx)) {
Some(data) => {
// Copy as much data as we can fit in the buffer
let to_read = std::cmp::min(data.len(), buf.remaining());
buf.put_slice(&data[..to_read]);
// Store any remaining data in the internal buffer for next time
if to_read < data.len() {
let remaining = &data[to_read..];
this.read_buffer.extend_from_slice(remaining);
}
Poll::Ready(Ok(()))
}
None => Poll::Ready(Ok(())),
}
}
Err(mpsc::error::TryRecvError::Disconnected) => {
// Channel closed, return EOF
Poll::Ready(Ok(()))
}
}
}
}
impl Write for TxReadyStream {
fn poll_write(
mut self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
if !self.poll_since_write {
return Poll::Pending;
}
self.poll_since_write = false;
let this = self.project();
let buf = Vec::from(&buf[..buf.len()]);
let len = buf.len();
// Send data through the channel - this should always be ready for unbounded channels
match this.write_tx.send(buf) {
Ok(_) => {
// Increment write count
Poll::Ready(Ok(len))
}
Err(_) => {
error!("ReadyStream::poll_write failed - channel closed");
Poll::Ready(Err(io::Error::new(
io::ErrorKind::BrokenPipe,
"Write channel closed",
)))
}
}
}
fn poll_flush(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
self.flush_count += 1;
// We require two flushes to complete each chunk, simulating a success at the end of the old
// poll loop. After all chunks are written, we always succeed on flush to allow for finish.
if self.flush_count % 2 != 0 && self.flush_count < TOTAL_CHUNKS * 2 {
// Spawn panic task if not already spawned
if self.panic_task.is_none() {
let task = tokio::spawn(async {
tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
});
self.panic_task = Some(task);
}
return Poll::Pending;
}
// Abort the panic task if it exists
if let Some(task) = self.panic_task.take() {
info!("Task polled to completion. Aborting panic (aka waker stand-in task).");
task.abort();
}
self.poll_since_write = true;
Poll::Ready(Ok(()))
}
fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Poll::Ready(Ok(()))
}
}
fn init_tracing() {
use std::sync::Once;
static INIT: Once = Once::new();
INIT.call_once(|| {
tracing_subscriber::fmt()
.with_max_level(tracing::Level::INFO)
.with_target(true)
.with_thread_ids(true)
.with_thread_names(true)
.init();
});
}
const TOTAL_CHUNKS: usize = 16;
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn body_test() {
init_tracing();
// Create a pair of connected streams
let (server_stream, mut client_stream) = TxReadyStream::new_pair();
let mut http_builder = http1::Builder::new();
http_builder.max_buf_size(CHUNK_SIZE);
const CHUNK_SIZE: usize = 64 * 1024;
let service = service_fn(|_| async move {
info!(
"Creating payload of {} chunks of {} KiB each ({} MiB total)...",
TOTAL_CHUNKS,
CHUNK_SIZE / 1024,
TOTAL_CHUNKS * CHUNK_SIZE / (1024 * 1024)
);
let bytes = Bytes::from(vec![0; CHUNK_SIZE]);
let data = vec![bytes.clone(); TOTAL_CHUNKS];
let stream = futures_util::stream::iter(
data.into_iter()
.map(|b| Ok::<_, Infallible>(Frame::data(b))),
);
let body = StreamBody::new(stream);
info!("Server: Sending data response...");
Ok::<_, hyper::Error>(
Response::builder()
.status(StatusCode::OK)
.header("content-type", "application/octet-stream")
.header("content-length", (TOTAL_CHUNKS * CHUNK_SIZE).to_string())
.body(body)
.unwrap(),
)
});
let server_task = tokio::spawn(async move {
let conn = http_builder.serve_connection(server_stream, service);
if let Err(e) = conn.await {
error!("Server connection error: {}", e);
}
});
let get_request = "GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n";
client_stream.send(get_request.as_bytes()).unwrap();
info!("Client is reading response...");
let mut bytes_received = 0;
while let Some(chunk) = client_stream.recv().await {
bytes_received += chunk.len();
}
// Clean up
server_task.abort();
info!(bytes_received, "Client done receiving bytes");
}