Skip to content

Commit 857b026

Browse files
bsberndMiklos Szeredi
authored andcommitted
fuse: Allow to queue bg requests through io-uring
This prepares queueing and sending background requests through io-uring. Signed-off-by: Bernd Schubert <[email protected]> Reviewed-by: Pavel Begunkov <[email protected]> # io_uring Reviewed-by: Luis Henriques <[email protected]> Signed-off-by: Miklos Szeredi <[email protected]>
1 parent c2c9af9 commit 857b026

File tree

3 files changed

+136
-1
lines changed

3 files changed

+136
-1
lines changed

fs/fuse/dev.c

Lines changed: 25 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -568,7 +568,25 @@ ssize_t __fuse_simple_request(struct mnt_idmap *idmap,
568568
return ret;
569569
}
570570

571-
static bool fuse_request_queue_background(struct fuse_req *req)
571+
#ifdef CONFIG_FUSE_IO_URING
572+
static bool fuse_request_queue_background_uring(struct fuse_conn *fc,
573+
struct fuse_req *req)
574+
{
575+
struct fuse_iqueue *fiq = &fc->iq;
576+
577+
req->in.h.unique = fuse_get_unique(fiq);
578+
req->in.h.len = sizeof(struct fuse_in_header) +
579+
fuse_len_args(req->args->in_numargs,
580+
(struct fuse_arg *) req->args->in_args);
581+
582+
return fuse_uring_queue_bq_req(req);
583+
}
584+
#endif
585+
586+
/*
587+
* @return true if queued
588+
*/
589+
static int fuse_request_queue_background(struct fuse_req *req)
572590
{
573591
struct fuse_mount *fm = req->fm;
574592
struct fuse_conn *fc = fm->fc;
@@ -580,6 +598,12 @@ static bool fuse_request_queue_background(struct fuse_req *req)
580598
atomic_inc(&fc->num_waiting);
581599
}
582600
__set_bit(FR_ISREPLY, &req->flags);
601+
602+
#ifdef CONFIG_FUSE_IO_URING
603+
if (fuse_uring_ready(fc))
604+
return fuse_request_queue_background_uring(fc, req);
605+
#endif
606+
583607
spin_lock(&fc->bg_lock);
584608
if (likely(fc->connected)) {
585609
fc->num_background++;

fs/fuse/dev_uring.c

Lines changed: 99 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,10 +47,53 @@ static struct fuse_ring_ent *uring_cmd_to_ring_ent(struct io_uring_cmd *cmd)
4747
return pdu->ent;
4848
}
4949

50+
static void fuse_uring_flush_bg(struct fuse_ring_queue *queue)
51+
{
52+
struct fuse_ring *ring = queue->ring;
53+
struct fuse_conn *fc = ring->fc;
54+
55+
lockdep_assert_held(&queue->lock);
56+
lockdep_assert_held(&fc->bg_lock);
57+
58+
/*
59+
* Allow one bg request per queue, ignoring global fc limits.
60+
* This prevents a single queue from consuming all resources and
61+
* eliminates the need for remote queue wake-ups when global
62+
* limits are met but this queue has no more waiting requests.
63+
*/
64+
while ((fc->active_background < fc->max_background ||
65+
!queue->active_background) &&
66+
(!list_empty(&queue->fuse_req_bg_queue))) {
67+
struct fuse_req *req;
68+
69+
req = list_first_entry(&queue->fuse_req_bg_queue,
70+
struct fuse_req, list);
71+
fc->active_background++;
72+
queue->active_background++;
73+
74+
list_move_tail(&req->list, &queue->fuse_req_queue);
75+
}
76+
}
77+
5078
static void fuse_uring_req_end(struct fuse_ring_ent *ent, struct fuse_req *req,
5179
int error)
5280
{
81+
struct fuse_ring_queue *queue = ent->queue;
82+
struct fuse_ring *ring = queue->ring;
83+
struct fuse_conn *fc = ring->fc;
84+
85+
lockdep_assert_not_held(&queue->lock);
86+
spin_lock(&queue->lock);
5387
ent->fuse_req = NULL;
88+
if (test_bit(FR_BACKGROUND, &req->flags)) {
89+
queue->active_background--;
90+
spin_lock(&fc->bg_lock);
91+
fuse_uring_flush_bg(queue);
92+
spin_unlock(&fc->bg_lock);
93+
}
94+
95+
spin_unlock(&queue->lock);
96+
5497
if (error)
5598
req->out.h.error = error;
5699

@@ -78,13 +121,21 @@ void fuse_uring_abort_end_requests(struct fuse_ring *ring)
78121
{
79122
int qid;
80123
struct fuse_ring_queue *queue;
124+
struct fuse_conn *fc = ring->fc;
81125

82126
for (qid = 0; qid < ring->nr_queues; qid++) {
83127
queue = READ_ONCE(ring->queues[qid]);
84128
if (!queue)
85129
continue;
86130

87131
queue->stopped = true;
132+
133+
WARN_ON_ONCE(ring->fc->max_background != UINT_MAX);
134+
spin_lock(&queue->lock);
135+
spin_lock(&fc->bg_lock);
136+
fuse_uring_flush_bg(queue);
137+
spin_unlock(&fc->bg_lock);
138+
spin_unlock(&queue->lock);
88139
fuse_uring_abort_end_queue_requests(queue);
89140
}
90141
}
@@ -190,6 +241,7 @@ static struct fuse_ring_queue *fuse_uring_create_queue(struct fuse_ring *ring,
190241
INIT_LIST_HEAD(&queue->ent_w_req_queue);
191242
INIT_LIST_HEAD(&queue->ent_in_userspace);
192243
INIT_LIST_HEAD(&queue->fuse_req_queue);
244+
INIT_LIST_HEAD(&queue->fuse_req_bg_queue);
193245

194246
queue->fpq.processing = pq;
195247
fuse_pqueue_init(&queue->fpq);
@@ -1141,6 +1193,53 @@ void fuse_uring_queue_fuse_req(struct fuse_iqueue *fiq, struct fuse_req *req)
11411193
fuse_request_end(req);
11421194
}
11431195

1196+
bool fuse_uring_queue_bq_req(struct fuse_req *req)
1197+
{
1198+
struct fuse_conn *fc = req->fm->fc;
1199+
struct fuse_ring *ring = fc->ring;
1200+
struct fuse_ring_queue *queue;
1201+
struct fuse_ring_ent *ent = NULL;
1202+
1203+
queue = fuse_uring_task_to_queue(ring);
1204+
if (!queue)
1205+
return false;
1206+
1207+
spin_lock(&queue->lock);
1208+
if (unlikely(queue->stopped)) {
1209+
spin_unlock(&queue->lock);
1210+
return false;
1211+
}
1212+
1213+
list_add_tail(&req->list, &queue->fuse_req_bg_queue);
1214+
1215+
ent = list_first_entry_or_null(&queue->ent_avail_queue,
1216+
struct fuse_ring_ent, list);
1217+
spin_lock(&fc->bg_lock);
1218+
fc->num_background++;
1219+
if (fc->num_background == fc->max_background)
1220+
fc->blocked = 1;
1221+
fuse_uring_flush_bg(queue);
1222+
spin_unlock(&fc->bg_lock);
1223+
1224+
/*
1225+
* Due to bg_queue flush limits there might be other bg requests
1226+
* in the queue that need to be handled first. Or no further req
1227+
* might be available.
1228+
*/
1229+
req = list_first_entry_or_null(&queue->fuse_req_queue, struct fuse_req,
1230+
list);
1231+
if (ent && req) {
1232+
fuse_uring_add_req_to_ring_ent(ent, req);
1233+
spin_unlock(&queue->lock);
1234+
1235+
fuse_uring_dispatch_ent(ent);
1236+
} else {
1237+
spin_unlock(&queue->lock);
1238+
}
1239+
1240+
return true;
1241+
}
1242+
11441243
static const struct fuse_iqueue_ops fuse_io_uring_ops = {
11451244
/* should be send over io-uring as enhancement */
11461245
.send_forget = fuse_dev_queue_forget,

fs/fuse/dev_uring_i.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -82,8 +82,13 @@ struct fuse_ring_queue {
8282
/* fuse requests waiting for an entry slot */
8383
struct list_head fuse_req_queue;
8484

85+
/* background fuse requests */
86+
struct list_head fuse_req_bg_queue;
87+
8588
struct fuse_pqueue fpq;
8689

90+
unsigned int active_background;
91+
8792
bool stopped;
8893
};
8994

@@ -127,6 +132,7 @@ void fuse_uring_stop_queues(struct fuse_ring *ring);
127132
void fuse_uring_abort_end_requests(struct fuse_ring *ring);
128133
int fuse_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags);
129134
void fuse_uring_queue_fuse_req(struct fuse_iqueue *fiq, struct fuse_req *req);
135+
bool fuse_uring_queue_bq_req(struct fuse_req *req);
130136

131137
static inline void fuse_uring_abort(struct fuse_conn *fc)
132138
{
@@ -179,6 +185,12 @@ static inline void fuse_uring_abort(struct fuse_conn *fc)
179185
static inline void fuse_uring_wait_stopped_queues(struct fuse_conn *fc)
180186
{
181187
}
188+
189+
static inline bool fuse_uring_ready(struct fuse_conn *fc)
190+
{
191+
return false;
192+
}
193+
182194
#endif /* CONFIG_FUSE_IO_URING */
183195

184196
#endif /* _FS_FUSE_DEV_URING_I_H */

0 commit comments

Comments
 (0)