13
13
#include "filetable.h"
14
14
#include "msg_ring.h"
15
15
16
-
17
16
/* All valid masks for MSG_RING */
18
17
#define IORING_MSG_RING_MASK (IORING_MSG_RING_CQE_SKIP | \
19
18
IORING_MSG_RING_FLAGS_PASS)
@@ -71,54 +70,43 @@ static inline bool io_msg_need_remote(struct io_ring_ctx *target_ctx)
71
70
return target_ctx -> task_complete ;
72
71
}
73
72
74
- static int io_msg_exec_remote (struct io_kiocb * req , task_work_func_t func )
73
+ static void io_msg_tw_complete (struct io_kiocb * req , struct io_tw_state * ts )
75
74
{
76
- struct io_ring_ctx * ctx = req -> file -> private_data ;
77
- struct io_msg * msg = io_kiocb_to_cmd (req , struct io_msg );
78
- struct task_struct * task = READ_ONCE (ctx -> submitter_task );
79
-
80
- if (unlikely (!task ))
81
- return - EOWNERDEAD ;
75
+ struct io_ring_ctx * ctx = req -> ctx ;
82
76
83
- init_task_work (& msg -> tw , func );
84
- if (task_work_add (task , & msg -> tw , TWA_SIGNAL ))
85
- return - EOWNERDEAD ;
77
+ io_add_aux_cqe (ctx , req -> cqe .user_data , req -> cqe .res , req -> cqe .flags );
78
+ kmem_cache_free (req_cachep , req );
79
+ percpu_ref_put (& ctx -> refs );
80
+ }
86
81
87
- return IOU_ISSUE_SKIP_COMPLETE ;
82
+ static void io_msg_remote_post (struct io_ring_ctx * ctx , struct io_kiocb * req ,
83
+ int res , u32 cflags , u64 user_data )
84
+ {
85
+ req -> cqe .user_data = user_data ;
86
+ io_req_set_res (req , res , cflags );
87
+ percpu_ref_get (& ctx -> refs );
88
+ req -> ctx = ctx ;
89
+ req -> task = READ_ONCE (ctx -> submitter_task );
90
+ req -> io_task_work .func = io_msg_tw_complete ;
91
+ io_req_task_work_add_remote (req , ctx , IOU_F_TWQ_LAZY_WAKE );
88
92
}
89
93
90
- static void io_msg_tw_complete (struct callback_head * head )
94
+ static int io_msg_data_remote (struct io_kiocb * req )
91
95
{
92
- struct io_msg * msg = container_of (head , struct io_msg , tw );
93
- struct io_kiocb * req = cmd_to_io_kiocb (msg );
94
96
struct io_ring_ctx * target_ctx = req -> file -> private_data ;
95
- int ret = 0 ;
96
-
97
- if (current -> flags & PF_EXITING ) {
98
- ret = - EOWNERDEAD ;
99
- } else {
100
- u32 flags = 0 ;
101
-
102
- if (msg -> flags & IORING_MSG_RING_FLAGS_PASS )
103
- flags = msg -> cqe_flags ;
104
-
105
- /*
106
- * If the target ring is using IOPOLL mode, then we need to be
107
- * holding the uring_lock for posting completions. Other ring
108
- * types rely on the regular completion locking, which is
109
- * handled while posting.
110
- */
111
- if (target_ctx -> flags & IORING_SETUP_IOPOLL )
112
- mutex_lock (& target_ctx -> uring_lock );
113
- if (!io_post_aux_cqe (target_ctx , msg -> user_data , msg -> len , flags ))
114
- ret = - EOVERFLOW ;
115
- if (target_ctx -> flags & IORING_SETUP_IOPOLL )
116
- mutex_unlock (& target_ctx -> uring_lock );
117
- }
97
+ struct io_msg * msg = io_kiocb_to_cmd (req , struct io_msg );
98
+ struct io_kiocb * target ;
99
+ u32 flags = 0 ;
118
100
119
- if (ret < 0 )
120
- req_set_fail (req );
121
- io_req_queue_tw_complete (req , ret );
101
+ target = kmem_cache_alloc (req_cachep , GFP_KERNEL );
102
+ if (unlikely (!target ))
103
+ return - ENOMEM ;
104
+
105
+ if (msg -> flags & IORING_MSG_RING_FLAGS_PASS )
106
+ flags = msg -> cqe_flags ;
107
+
108
+ io_msg_remote_post (target_ctx , target , msg -> len , flags , msg -> user_data );
109
+ return 0 ;
122
110
}
123
111
124
112
static int io_msg_ring_data (struct io_kiocb * req , unsigned int issue_flags )
@@ -136,7 +124,7 @@ static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags)
136
124
return - EBADFD ;
137
125
138
126
if (io_msg_need_remote (target_ctx ))
139
- return io_msg_exec_remote (req , io_msg_tw_complete );
127
+ return io_msg_data_remote (req );
140
128
141
129
if (msg -> flags & IORING_MSG_RING_FLAGS_PASS )
142
130
flags = msg -> cqe_flags ;
@@ -216,6 +204,22 @@ static void io_msg_tw_fd_complete(struct callback_head *head)
216
204
io_req_queue_tw_complete (req , ret );
217
205
}
218
206
207
+ static int io_msg_fd_remote (struct io_kiocb * req )
208
+ {
209
+ struct io_ring_ctx * ctx = req -> file -> private_data ;
210
+ struct io_msg * msg = io_kiocb_to_cmd (req , struct io_msg );
211
+ struct task_struct * task = READ_ONCE (ctx -> submitter_task );
212
+
213
+ if (unlikely (!task ))
214
+ return - EOWNERDEAD ;
215
+
216
+ init_task_work (& msg -> tw , io_msg_tw_fd_complete );
217
+ if (task_work_add (task , & msg -> tw , TWA_SIGNAL ))
218
+ return - EOWNERDEAD ;
219
+
220
+ return IOU_ISSUE_SKIP_COMPLETE ;
221
+ }
222
+
219
223
static int io_msg_send_fd (struct io_kiocb * req , unsigned int issue_flags )
220
224
{
221
225
struct io_ring_ctx * target_ctx = req -> file -> private_data ;
@@ -238,7 +242,7 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
238
242
}
239
243
240
244
if (io_msg_need_remote (target_ctx ))
241
- return io_msg_exec_remote (req , io_msg_tw_fd_complete );
245
+ return io_msg_fd_remote (req );
242
246
return io_msg_install_complete (req , issue_flags );
243
247
}
244
248
0 commit comments