16
16
#include "comp.h"
17
17
18
18
#define rb_to_rq (rb ) rb_entry_safe(rb, struct request, rb_node)
19
- #define rq_rb_first (root ) rb_to_rq(rb_first (root))
19
+ #define rq_rb_first_cached (root ) rb_to_rq(rb_first_cached (root))
20
20
21
21
static void ioem_error_injection (struct request * rq );
22
22
@@ -46,7 +46,7 @@ static void ioem_error_injection(struct request* rq);
46
46
* this struct is only allocated per `request_queue`.
47
47
*/
48
48
struct ioem_data {
49
- struct rb_root root ;
49
+ struct rb_root_cached root ;
50
50
spinlock_t lock ;
51
51
52
52
struct hrtimer timer ;
@@ -112,8 +112,6 @@ struct irl {
112
112
atomic64_t io_counter ;
113
113
atomic64_t last_expire_time ;
114
114
struct hrtimer timer ;
115
-
116
- atomic64_t affected_request_counter ;
117
115
};
118
116
119
117
/**
@@ -193,7 +191,7 @@ static struct irl_dispatch_return irl_dispatch(struct ioem_data* data, struct re
193
191
read_lock (& irl -> lock );
194
192
195
193
period = atomic64_read (& irl -> io_period_us );
196
- if (period == 0 || ! ioem_priv ( rq ) -> ioem_limit_should_affect ) {
194
+ if (period == 0 ) {
197
195
// the irl is not enabled
198
196
ret .dispatch = 1 ;
199
197
ret .time_to_send = 0 ;
@@ -208,9 +206,9 @@ static struct irl_dispatch_return irl_dispatch(struct ioem_data* data, struct re
208
206
counter = atomic64_read (& irl -> io_counter );
209
207
}
210
208
if (counter < quota ) {
209
+ //
211
210
ret .dispatch = 1 ;
212
211
ret .time_to_send = 0 ;
213
- atomic64_dec (& irl -> affected_request_counter );
214
212
} else {
215
213
ret .dispatch = 0 ;
216
214
ret .time_to_send = last_expire_time + period * NSEC_PER_USEC ;
@@ -222,36 +220,6 @@ static struct irl_dispatch_return irl_dispatch(struct ioem_data* data, struct re
222
220
return ret ;
223
221
}
224
222
225
- /**
226
- * irl_enqueue() - optimize the time_to_send of a request which will enqueue
227
- * @data: The corresponding ioem_data struct
228
- * @rq: The request to be dispatch
229
- *
230
- * This function will read the counter inside irl. If the counter is already
231
- * greater than the quota and the `time_to_send` is earlier than the next
232
- * period, it will set the `time_to_send` of the request to the next period.
233
- */
234
- static void irl_enqueue (struct ioem_data * data , struct request * rq )
235
- {
236
- u64 next_period , period , counter ;
237
- struct irl * irl = data -> irl ;
238
-
239
- period = atomic64_read (& irl -> io_period_us );
240
- if (period == 0 || !ioem_priv (rq )-> ioem_limit_should_affect ) {
241
- return ;
242
- }
243
-
244
- counter = atomic64_fetch_add (1 , & irl -> affected_request_counter );
245
- read_lock (& irl -> lock );
246
- if (atomic64_read (& irl -> io_counter ) > irl -> io_quota ) {
247
- next_period = atomic64_read (& irl -> last_expire_time ) + atomic64_read (& irl -> io_period_us ) * NSEC_PER_USEC * (counter / irl -> io_quota );
248
- if (ioem_priv (rq )-> time_to_send < next_period ) {
249
- ioem_priv (rq )-> time_to_send = next_period ;
250
- };
251
- }
252
- read_unlock (& irl -> lock );
253
- }
254
-
255
223
static void ioem_data_sync_with_injections (struct ioem_data * data );
256
224
257
225
/**
@@ -264,7 +232,7 @@ static void ioem_data_sync_with_injections(struct ioem_data* data);
264
232
*/
265
233
static void ioem_erase_head (struct ioem_data * data , struct request * rq )
266
234
{
267
- rb_erase (& rq -> rb_node , & data -> root );
235
+ rb_erase_cached (& rq -> rb_node , & data -> root );
268
236
RB_CLEAR_NODE (& rq -> rb_node );
269
237
INIT_LIST_HEAD (& rq -> queuelist );
270
238
}
@@ -275,7 +243,7 @@ static void ioem_erase_head(struct ioem_data *data, struct request *rq)
275
243
*/
276
244
static struct request * ioem_peek_request (struct ioem_data * data )
277
245
{
278
- struct request * ioem_rq = rq_rb_first (& data -> root );
246
+ struct request * ioem_rq = rq_rb_first_cached (& data -> root );
279
247
280
248
return ioem_rq ;
281
249
}
@@ -299,7 +267,7 @@ static void ioem_data_init(struct ioem_data* data, enum hrtimer_restart (*functi
299
267
hrtimer_init (& data -> timer , CLOCK_MONOTONIC , HRTIMER_MODE_ABS_PINNED );
300
268
301
269
spin_lock_init (& data -> lock );
302
- data -> root = RB_ROOT ;
270
+ data -> root = RB_ROOT_CACHED ;
303
271
data -> timer .function = function ;
304
272
data -> next_expires = 0 ;
305
273
@@ -311,30 +279,29 @@ static void ioem_data_init(struct ioem_data* data, enum hrtimer_restart (*functi
311
279
* @data: The `ioem_data` strucutre
312
280
* @rq: The request
313
281
*
314
- * The request will be inserted into the rb tree. Before inserting the request,
315
- * it will also check whether this request will be affected by the irl and
316
- * whether the irl has
282
+ * The request will be inserted into the rb tree
317
283
*/
318
284
static void ioem_enqueue (struct ioem_data * data , struct request * rq )
319
285
{
320
- struct rb_node * * p = & data -> root .rb_node , * parent = NULL ;
286
+ struct rb_node * * p = & data -> root .rb_root .rb_node , * parent = NULL ;
287
+ bool leftmost = true;
321
288
322
- irl_enqueue (data , rq );
323
-
324
289
while (* p ) {
325
290
struct request * parent_rq ;
326
291
327
292
parent = * p ;
328
293
parent_rq = rb_entry_safe (parent , struct request , rb_node );
329
294
330
- if (ioem_priv (rq )-> time_to_send > ioem_priv (parent_rq )-> time_to_send )
295
+ if (ioem_priv (rq )-> time_to_send > ioem_priv (parent_rq )-> time_to_send ) {
331
296
p = & parent -> rb_right ;
297
+ leftmost = false;
298
+ }
332
299
else
333
300
p = & parent -> rb_left ;
334
301
}
335
302
336
303
rb_link_node (& rq -> rb_node , parent , p );
337
- rb_insert_color (& rq -> rb_node , & data -> root );
304
+ rb_insert_color_cached (& rq -> rb_node , & data -> root , leftmost );
338
305
}
339
306
340
307
/**
@@ -354,7 +321,7 @@ static struct request* ioem_dequeue(struct ioem_data *data)
354
321
u64 now , time_to_send ;
355
322
struct request * rq = NULL ;
356
323
357
- if (RB_EMPTY_ROOT (& data -> root )) {
324
+ if (RB_EMPTY_ROOT (& data -> root . rb_root )) {
358
325
return NULL ;
359
326
}
360
327
@@ -531,8 +498,6 @@ static void ioem_mq_insert_requests(struct blk_mq_hw_ctx * hctx, struct list_hea
531
498
ioem_data_sync_with_injections (id );
532
499
533
500
list_for_each_entry_safe (rq , next , list , queuelist ) {
534
- rq = list_first_entry (list , struct request , queuelist );
535
-
536
501
list_del (& rq -> queuelist );
537
502
538
503
if (at_head ) {
@@ -558,7 +523,7 @@ static bool ioem_mq_has_work(struct blk_mq_hw_ctx * hctx)
558
523
struct ioem_data * id = hctx -> sched_data ;
559
524
bool has_work = 0 ;
560
525
561
- has_work = !RB_EMPTY_ROOT (& id -> root );
526
+ has_work = !RB_EMPTY_ROOT (& id -> root . rb_root );
562
527
563
528
return has_work ;
564
529
}
@@ -988,6 +953,7 @@ int ioem_del(unsigned long id) {
988
953
{
989
954
list_del (& e -> list );
990
955
kref_put (& e -> refcount , ioem_injection_release );
956
+ break ;
991
957
}
992
958
}
993
959
0 commit comments