@@ -47,10 +47,53 @@ static struct fuse_ring_ent *uring_cmd_to_ring_ent(struct io_uring_cmd *cmd)
47
47
return pdu -> ent ;
48
48
}
49
49
50
+ static void fuse_uring_flush_bg (struct fuse_ring_queue * queue )
51
+ {
52
+ struct fuse_ring * ring = queue -> ring ;
53
+ struct fuse_conn * fc = ring -> fc ;
54
+
55
+ lockdep_assert_held (& queue -> lock );
56
+ lockdep_assert_held (& fc -> bg_lock );
57
+
58
+ /*
59
+ * Allow one bg request per queue, ignoring global fc limits.
60
+ * This prevents a single queue from consuming all resources and
61
+ * eliminates the need for remote queue wake-ups when global
62
+ * limits are met but this queue has no more waiting requests.
63
+ */
64
+ while ((fc -> active_background < fc -> max_background ||
65
+ !queue -> active_background ) &&
66
+ (!list_empty (& queue -> fuse_req_bg_queue ))) {
67
+ struct fuse_req * req ;
68
+
69
+ req = list_first_entry (& queue -> fuse_req_bg_queue ,
70
+ struct fuse_req , list );
71
+ fc -> active_background ++ ;
72
+ queue -> active_background ++ ;
73
+
74
+ list_move_tail (& req -> list , & queue -> fuse_req_queue );
75
+ }
76
+ }
77
+
50
78
static void fuse_uring_req_end (struct fuse_ring_ent * ent , struct fuse_req * req ,
51
79
int error )
52
80
{
81
+ struct fuse_ring_queue * queue = ent -> queue ;
82
+ struct fuse_ring * ring = queue -> ring ;
83
+ struct fuse_conn * fc = ring -> fc ;
84
+
85
+ lockdep_assert_not_held (& queue -> lock );
86
+ spin_lock (& queue -> lock );
53
87
ent -> fuse_req = NULL ;
88
+ if (test_bit (FR_BACKGROUND , & req -> flags )) {
89
+ queue -> active_background -- ;
90
+ spin_lock (& fc -> bg_lock );
91
+ fuse_uring_flush_bg (queue );
92
+ spin_unlock (& fc -> bg_lock );
93
+ }
94
+
95
+ spin_unlock (& queue -> lock );
96
+
54
97
if (error )
55
98
req -> out .h .error = error ;
56
99
@@ -78,13 +121,21 @@ void fuse_uring_abort_end_requests(struct fuse_ring *ring)
78
121
{
79
122
int qid ;
80
123
struct fuse_ring_queue * queue ;
124
+ struct fuse_conn * fc = ring -> fc ;
81
125
82
126
for (qid = 0 ; qid < ring -> nr_queues ; qid ++ ) {
83
127
queue = READ_ONCE (ring -> queues [qid ]);
84
128
if (!queue )
85
129
continue ;
86
130
87
131
queue -> stopped = true;
132
+
133
+ WARN_ON_ONCE (ring -> fc -> max_background != UINT_MAX );
134
+ spin_lock (& queue -> lock );
135
+ spin_lock (& fc -> bg_lock );
136
+ fuse_uring_flush_bg (queue );
137
+ spin_unlock (& fc -> bg_lock );
138
+ spin_unlock (& queue -> lock );
88
139
fuse_uring_abort_end_queue_requests (queue );
89
140
}
90
141
}
@@ -190,6 +241,7 @@ static struct fuse_ring_queue *fuse_uring_create_queue(struct fuse_ring *ring,
190
241
INIT_LIST_HEAD (& queue -> ent_w_req_queue );
191
242
INIT_LIST_HEAD (& queue -> ent_in_userspace );
192
243
INIT_LIST_HEAD (& queue -> fuse_req_queue );
244
+ INIT_LIST_HEAD (& queue -> fuse_req_bg_queue );
193
245
194
246
queue -> fpq .processing = pq ;
195
247
fuse_pqueue_init (& queue -> fpq );
@@ -1141,6 +1193,53 @@ void fuse_uring_queue_fuse_req(struct fuse_iqueue *fiq, struct fuse_req *req)
1141
1193
fuse_request_end (req );
1142
1194
}
1143
1195
1196
+ bool fuse_uring_queue_bq_req (struct fuse_req * req )
1197
+ {
1198
+ struct fuse_conn * fc = req -> fm -> fc ;
1199
+ struct fuse_ring * ring = fc -> ring ;
1200
+ struct fuse_ring_queue * queue ;
1201
+ struct fuse_ring_ent * ent = NULL ;
1202
+
1203
+ queue = fuse_uring_task_to_queue (ring );
1204
+ if (!queue )
1205
+ return false;
1206
+
1207
+ spin_lock (& queue -> lock );
1208
+ if (unlikely (queue -> stopped )) {
1209
+ spin_unlock (& queue -> lock );
1210
+ return false;
1211
+ }
1212
+
1213
+ list_add_tail (& req -> list , & queue -> fuse_req_bg_queue );
1214
+
1215
+ ent = list_first_entry_or_null (& queue -> ent_avail_queue ,
1216
+ struct fuse_ring_ent , list );
1217
+ spin_lock (& fc -> bg_lock );
1218
+ fc -> num_background ++ ;
1219
+ if (fc -> num_background == fc -> max_background )
1220
+ fc -> blocked = 1 ;
1221
+ fuse_uring_flush_bg (queue );
1222
+ spin_unlock (& fc -> bg_lock );
1223
+
1224
+ /*
1225
+ * Due to bg_queue flush limits there might be other bg requests
1226
+ * in the queue that need to be handled first. Or no further req
1227
+ * might be available.
1228
+ */
1229
+ req = list_first_entry_or_null (& queue -> fuse_req_queue , struct fuse_req ,
1230
+ list );
1231
+ if (ent && req ) {
1232
+ fuse_uring_add_req_to_ring_ent (ent , req );
1233
+ spin_unlock (& queue -> lock );
1234
+
1235
+ fuse_uring_dispatch_ent (ent );
1236
+ } else {
1237
+ spin_unlock (& queue -> lock );
1238
+ }
1239
+
1240
+ return true;
1241
+ }
1242
+
1144
1243
static const struct fuse_iqueue_ops fuse_io_uring_ops = {
1145
1244
/* should be send over io-uring as enhancement */
1146
1245
.send_forget = fuse_dev_queue_forget ,
0 commit comments