@@ -211,20 +211,11 @@ struct io_ring_ctx {
211
211
unsigned int drain_disabled : 1 ;
212
212
unsigned int compat : 1 ;
213
213
214
- enum task_work_notify_mode notify_method ;
214
+ struct task_struct * submitter_task ;
215
+ struct io_rings * rings ;
216
+ struct percpu_ref refs ;
215
217
216
- /*
217
- * If IORING_SETUP_NO_MMAP is used, then the below holds
218
- * the gup'ed pages for the two rings, and the sqes.
219
- */
220
- unsigned short n_ring_pages ;
221
- unsigned short n_sqe_pages ;
222
- struct page * * ring_pages ;
223
- struct page * * sqe_pages ;
224
-
225
- struct io_rings * rings ;
226
- struct task_struct * submitter_task ;
227
- struct percpu_ref refs ;
218
+ enum task_work_notify_mode notify_method ;
228
219
} ____cacheline_aligned_in_smp ;
229
220
230
221
/* submission data */
@@ -262,10 +253,8 @@ struct io_ring_ctx {
262
253
263
254
struct io_buffer_list * io_bl ;
264
255
struct xarray io_bl_xa ;
265
- struct list_head io_buffers_cache ;
266
256
267
257
struct io_hash_table cancel_table_locked ;
268
- struct list_head cq_overflow_list ;
269
258
struct io_alloc_cache apoll_cache ;
270
259
struct io_alloc_cache netmsg_cache ;
271
260
} ____cacheline_aligned_in_smp ;
@@ -298,11 +287,8 @@ struct io_ring_ctx {
298
287
* manipulate the list, hence no extra locking is needed there.
299
288
*/
300
289
struct io_wq_work_list iopoll_list ;
301
- struct io_hash_table cancel_table ;
302
290
303
291
struct llist_head work_llist ;
304
-
305
- struct list_head io_buffers_comp ;
306
292
} ____cacheline_aligned_in_smp ;
307
293
308
294
/* timeouts */
@@ -318,6 +304,10 @@ struct io_ring_ctx {
318
304
struct io_wq_work_list locked_free_list ;
319
305
unsigned int locked_free_nr ;
320
306
307
+ struct list_head io_buffers_comp ;
308
+ struct list_head cq_overflow_list ;
309
+ struct io_hash_table cancel_table ;
310
+
321
311
const struct cred * sq_creds ; /* cred used for __io_sq_thread() */
322
312
struct io_sq_data * sq_data ; /* if using sq thread polling */
323
313
@@ -332,6 +322,8 @@ struct io_ring_ctx {
332
322
struct xarray personalities ;
333
323
u32 pers_next ;
334
324
325
+ struct list_head io_buffers_cache ;
326
+
335
327
/* Keep this last, we don't need it for the fast path */
336
328
struct wait_queue_head poll_wq ;
337
329
struct io_restriction restrictions ;
@@ -375,6 +367,15 @@ struct io_ring_ctx {
375
367
unsigned sq_thread_idle ;
376
368
/* protected by ->completion_lock */
377
369
unsigned evfd_last_cq_tail ;
370
+
371
+ /*
372
+ * If IORING_SETUP_NO_MMAP is used, then the below holds
373
+ * the gup'ed pages for the two rings, and the sqes.
374
+ */
375
+ unsigned short n_ring_pages ;
376
+ unsigned short n_sqe_pages ;
377
+ struct page * * ring_pages ;
378
+ struct page * * sqe_pages ;
378
379
};
379
380
380
381
struct io_tw_state {
0 commit comments