-
Notifications
You must be signed in to change notification settings - Fork 27
Expand file tree
/
Copy pathexecutors.rs
More file actions
267 lines (253 loc) · 10.2 KB
/
executors.rs
File metadata and controls
267 lines (253 loc) · 10.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
use std::sync::atomic::AtomicI64;
use std::sync::Arc;
use std::time::Duration;
use async_trait::async_trait;
use log::{info, trace, warn};
use serde::Serialize;
use tokio::sync::mpsc::{Receiver, Sender, UnboundedSender};
use tokio::sync::{broadcast, Mutex};
use tokio::task::JoinHandle;
use crate::requests::{
TextGenerationAggregatedResponse, TextGenerationBackend, TextGenerationRequest,
TextRequestGenerator,
};
#[serde_with::serde_as]
#[derive(Clone, Serialize)]
pub struct ExecutorConfig {
pub max_vus: u64,
#[serde(rename = "duration_secs")]
#[serde_as(as = "serde_with::DurationSeconds<u64>")]
pub duration: Duration,
pub rate: Option<f64>,
}
#[async_trait]
pub trait Executor {
async fn run(
&self,
requests: Arc<Mutex<dyn TextRequestGenerator + Send>>,
responses_tx: UnboundedSender<TextGenerationAggregatedResponse>,
stop_sender: broadcast::Sender<()>,
);
}
pub struct ConstantVUsExecutor {
config: ExecutorConfig,
backend: Box<dyn TextGenerationBackend + Send + Sync>,
}
impl ConstantVUsExecutor {
pub fn new(
backend: Box<dyn TextGenerationBackend + Send + Sync>,
max_vus: u64,
duration: Duration,
) -> ConstantVUsExecutor {
Self {
backend,
config: ExecutorConfig {
max_vus,
duration,
rate: None,
},
}
}
}
#[async_trait]
impl Executor for ConstantVUsExecutor {
async fn run(
&self,
requests: Arc<Mutex<dyn TextRequestGenerator + Send>>,
responses_tx: UnboundedSender<TextGenerationAggregatedResponse>,
stop_sender: broadcast::Sender<()>,
) {
let start = std::time::Instant::now();
// channel to handle ending VUs
let (end_tx, mut end_rx): (Sender<bool>, Receiver<bool>) =
tokio::sync::mpsc::channel(self.config.max_vus as usize);
let active_vus = Arc::new(AtomicI64::new(0));
// start all VUs
let (timeout_tx, _) = tokio::sync::broadcast::channel::<()>(self.config.max_vus as usize);
// sleep for duration and send timeout signal
{
let timeout_duration = self.config.duration;
let timeout_tx = timeout_tx.clone();
tokio::spawn(async move {
tokio::time::sleep(timeout_duration).await;
// send timeout signal to all VUs
let _ = timeout_tx.send(());
});
}
for _ in 0..self.config.max_vus {
let mut requests_guard = requests.lock().await;
let request = Arc::from(requests_guard.generate_request());
drop(requests_guard);
start_vu(
self.backend.clone(),
request,
responses_tx.clone(),
end_tx.clone(),
stop_sender.clone(),
Some(timeout_tx.subscribe()),
)
.await;
active_vus.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
}
let mut stop_receiver = stop_sender.subscribe();
tokio::select! {
_ = stop_receiver.recv() => {
return;
},
_ = async {
// replenish VUs as they finish
while end_rx.recv().await.is_some() {
active_vus.fetch_sub(1, std::sync::atomic::Ordering::SeqCst);
if start.elapsed() > self.config.duration{
// signal that the VU work is done
let _ = responses_tx.send(TextGenerationAggregatedResponse::new_as_ended());
info!("Duration reached, waiting for all VUs to finish...");
if active_vus.load(std::sync::atomic::Ordering::SeqCst) == 0 {
break;
}
} else {
let mut requests_guard = requests.lock().await;
let request = Arc::from(requests_guard.generate_request());
drop(requests_guard);
active_vus.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
start_vu(self.backend.clone(), request, responses_tx.clone(), end_tx.clone(), stop_sender.clone(), Some(timeout_tx.subscribe())).await;
}
}
}=>{}
}
}
}
async fn start_vu(
backend: Box<dyn TextGenerationBackend + Send + Sync>,
request: Arc<TextGenerationRequest>,
responses_tx: UnboundedSender<TextGenerationAggregatedResponse>,
end_tx: Sender<bool>,
stop_sender: broadcast::Sender<()>,
mut timeout_stop_receiver: Option<broadcast::Receiver<()>>,
) -> JoinHandle<()> {
let mut stop_receiver = stop_sender.subscribe();
tokio::spawn(async move {
tokio::select! {
_ = stop_receiver.recv() => {
let _ = end_tx.send(true).await;
},
_ = async {
if let Some(ref mut receiver) = timeout_stop_receiver {
let _ = receiver.recv().await;
} else {
std::future::pending::<()>().await; // wait forever if no timeout receiver is provided
}
} => {
let _ = end_tx.send(true).await;
},
_ = async{
let (tx, mut rx): (Sender<TextGenerationAggregatedResponse>, Receiver<TextGenerationAggregatedResponse>) = tokio::sync::mpsc::channel(1);
trace!("VU started with request: {:?}", request);
let req_thread = tokio::spawn(async move {
backend.generate(request.clone(), tx).await;
});
let send_thread = tokio::spawn(async move {
while let Some(response) = rx.recv().await {
// ignore errors, if the receiver is gone we want to finish the request
// to leave remote server in clean state
let _ = responses_tx.send(response);
}
});
req_thread.await.unwrap();
send_thread.await.unwrap();
// signal that the VU work is done
let _ = end_tx.send(true).await;
}=>{}
}
})
}
pub struct ConstantArrivalRateExecutor {
config: ExecutorConfig,
backend: Box<dyn TextGenerationBackend + Send + Sync>,
}
impl ConstantArrivalRateExecutor {
pub fn new(
backend: Box<dyn TextGenerationBackend + Send + Sync>,
max_vus: u64,
duration: Duration,
rate: f64,
) -> ConstantArrivalRateExecutor {
Self {
backend,
config: ExecutorConfig {
max_vus,
duration,
rate: Some(rate),
},
}
}
}
#[async_trait]
impl Executor for ConstantArrivalRateExecutor {
async fn run(
&self,
requests: Arc<Mutex<dyn TextRequestGenerator + Send>>,
responses_tx: UnboundedSender<TextGenerationAggregatedResponse>,
stop_sender: broadcast::Sender<()>,
) {
let start = std::time::Instant::now();
let active_vus = Arc::new(AtomicI64::new(0));
// channel to handle ending VUs
let (end_tx, mut end_rx): (Sender<bool>, Receiver<bool>) =
tokio::sync::mpsc::channel(self.config.max_vus as usize);
let rate = self.config.rate.expect("checked in new()");
// spawn new VUs every `tick_ms` to reach the expected `rate` per second, until the duration is reached
let tick_ms = 10;
let mut interval = tokio::time::interval(Duration::from_millis(tick_ms));
let backend = self.backend.clone();
let duration = self.config.duration;
let max_vus = self.config.max_vus;
let active_vus_thread = active_vus.clone();
let mut stop_receiver_signal = stop_sender.subscribe();
let vu_thread = tokio::spawn(async move {
tokio::select! {
_ = stop_receiver_signal.recv() => {},
_= async {
let mut spawn_queue = 0.; // start with at least one VU
while start.elapsed() < duration {
spawn_queue += rate * (tick_ms as f64) / 1000.0;
// delay spawning if we can't spawn a full VU yet
if spawn_queue < 1.0 {
interval.tick().await;
continue;
}
// spawn VUs, keep track of the fraction of VU to spawn for the next iteration
let to_spawn = spawn_queue.floor() as u64;
spawn_queue -= to_spawn as f64;
for _ in 0..to_spawn {
if active_vus_thread.load(std::sync::atomic::Ordering::SeqCst) < max_vus as i64 {
let mut requests_guard = requests.lock().await;
let request = Arc::from(requests_guard.generate_request());
start_vu(backend.clone(), request.clone(), responses_tx.clone(), end_tx.clone(), stop_sender.clone(), None).await;
active_vus_thread.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
} else {
warn!("Max VUs reached, skipping request");
break;
}
}
interval.tick().await;
}
// signal that the VU work is done
info!("Duration reached, waiting for all VUs to finish...");
let _ = responses_tx.send(TextGenerationAggregatedResponse::new_as_ended());
}=>{}
}
});
while end_rx.recv().await.is_some() {
active_vus.fetch_sub(1, std::sync::atomic::Ordering::SeqCst);
// wait for all VUs to finish
if start.elapsed() > self.config.duration
&& active_vus.load(std::sync::atomic::Ordering::SeqCst) == 0
{
break;
}
}
// wait for the VU thread to finish
vu_thread.await.unwrap();
}
}