@@ -150,6 +150,7 @@ void fuse_uring_destruct(struct fuse_conn *fc)
150
150
151
151
for (qid = 0 ; qid < ring -> nr_queues ; qid ++ ) {
152
152
struct fuse_ring_queue * queue = ring -> queues [qid ];
153
+ struct fuse_ring_ent * ent , * next ;
153
154
154
155
if (!queue )
155
156
continue ;
@@ -159,6 +160,12 @@ void fuse_uring_destruct(struct fuse_conn *fc)
159
160
WARN_ON (!list_empty (& queue -> ent_commit_queue ));
160
161
WARN_ON (!list_empty (& queue -> ent_in_userspace ));
161
162
163
+ list_for_each_entry_safe (ent , next , & queue -> ent_released ,
164
+ list ) {
165
+ list_del_init (& ent -> list );
166
+ kfree (ent );
167
+ }
168
+
162
169
kfree (queue -> fpq .processing );
163
170
kfree (queue );
164
171
ring -> queues [qid ] = NULL ;
@@ -242,6 +249,7 @@ static struct fuse_ring_queue *fuse_uring_create_queue(struct fuse_ring *ring,
242
249
INIT_LIST_HEAD (& queue -> ent_in_userspace );
243
250
INIT_LIST_HEAD (& queue -> fuse_req_queue );
244
251
INIT_LIST_HEAD (& queue -> fuse_req_bg_queue );
252
+ INIT_LIST_HEAD (& queue -> ent_released );
245
253
246
254
queue -> fpq .processing = pq ;
247
255
fuse_pqueue_init (& queue -> fpq );
@@ -289,16 +297,22 @@ static void fuse_uring_entry_teardown(struct fuse_ring_ent *ent)
289
297
/* remove entry from queue->fpq->processing */
290
298
list_del_init (& req -> list );
291
299
}
300
+
301
+ /*
302
+ * The entry must not be freed immediately, due to access of direct
303
+ * pointer access of entries through IO_URING_F_CANCEL - there is a risk
304
+ * of race between daemon termination (which triggers IO_URING_F_CANCEL
305
+ * and accesses entries without checking the list state first
306
+ */
307
+ list_move (& ent -> list , & queue -> ent_released );
308
+ ent -> state = FRRS_RELEASED ;
292
309
spin_unlock (& queue -> lock );
293
310
294
311
if (cmd )
295
312
io_uring_cmd_done (cmd , - ENOTCONN , 0 , IO_URING_F_UNLOCKED );
296
313
297
314
if (req )
298
315
fuse_uring_stop_fuse_req_end (req );
299
-
300
- list_del_init (& ent -> list );
301
- kfree (ent );
302
316
}
303
317
304
318
static void fuse_uring_stop_list_entries (struct list_head * head ,
@@ -318,6 +332,7 @@ static void fuse_uring_stop_list_entries(struct list_head *head,
318
332
continue ;
319
333
}
320
334
335
+ ent -> state = FRRS_TEARDOWN ;
321
336
list_move (& ent -> list , & to_teardown );
322
337
}
323
338
spin_unlock (& queue -> lock );
@@ -432,6 +447,46 @@ void fuse_uring_stop_queues(struct fuse_ring *ring)
432
447
}
433
448
}
434
449
450
+ /*
451
+ * Handle IO_URING_F_CANCEL, typically should come on daemon termination.
452
+ *
453
+ * Releasing the last entry should trigger fuse_dev_release() if
454
+ * the daemon was terminated
455
+ */
456
+ static void fuse_uring_cancel (struct io_uring_cmd * cmd ,
457
+ unsigned int issue_flags )
458
+ {
459
+ struct fuse_ring_ent * ent = uring_cmd_to_ring_ent (cmd );
460
+ struct fuse_ring_queue * queue ;
461
+ bool need_cmd_done = false;
462
+
463
+ /*
464
+ * direct access on ent - it must not be destructed as long as
465
+ * IO_URING_F_CANCEL might come up
466
+ */
467
+ queue = ent -> queue ;
468
+ spin_lock (& queue -> lock );
469
+ if (ent -> state == FRRS_AVAILABLE ) {
470
+ ent -> state = FRRS_USERSPACE ;
471
+ list_move (& ent -> list , & queue -> ent_in_userspace );
472
+ need_cmd_done = true;
473
+ ent -> cmd = NULL ;
474
+ }
475
+ spin_unlock (& queue -> lock );
476
+
477
+ if (need_cmd_done ) {
478
+ /* no queue lock to avoid lock order issues */
479
+ io_uring_cmd_done (cmd , - ENOTCONN , 0 , issue_flags );
480
+ }
481
+ }
482
+
483
+ static void fuse_uring_prepare_cancel (struct io_uring_cmd * cmd , int issue_flags ,
484
+ struct fuse_ring_ent * ring_ent )
485
+ {
486
+ uring_cmd_set_ring_ent (cmd , ring_ent );
487
+ io_uring_cmd_mark_cancelable (cmd , issue_flags );
488
+ }
489
+
435
490
/*
436
491
* Checks for errors and stores it into the request
437
492
*/
@@ -839,6 +894,7 @@ static int fuse_uring_commit_fetch(struct io_uring_cmd *cmd, int issue_flags,
839
894
spin_unlock (& queue -> lock );
840
895
841
896
/* without the queue lock, as other locks are taken */
897
+ fuse_uring_prepare_cancel (cmd , issue_flags , ent );
842
898
fuse_uring_commit (ent , req , issue_flags );
843
899
844
900
/*
@@ -888,6 +944,8 @@ static void fuse_uring_do_register(struct fuse_ring_ent *ent,
888
944
struct fuse_conn * fc = ring -> fc ;
889
945
struct fuse_iqueue * fiq = & fc -> iq ;
890
946
947
+ fuse_uring_prepare_cancel (cmd , issue_flags , ent );
948
+
891
949
spin_lock (& queue -> lock );
892
950
ent -> cmd = cmd ;
893
951
fuse_uring_ent_avail (ent , queue );
@@ -1038,6 +1096,11 @@ int __maybe_unused fuse_uring_cmd(struct io_uring_cmd *cmd,
1038
1096
return - EOPNOTSUPP ;
1039
1097
}
1040
1098
1099
+ if ((unlikely (issue_flags & IO_URING_F_CANCEL ))) {
1100
+ fuse_uring_cancel (cmd , issue_flags );
1101
+ return 0 ;
1102
+ }
1103
+
1041
1104
/* This extra SQE size holds struct fuse_uring_cmd_req */
1042
1105
if (!(issue_flags & IO_URING_F_SQE128 ))
1043
1106
return - EINVAL ;
0 commit comments