17
17
18
18
#define IO_BUFFER_LIST_BUF_PER_PAGE (PAGE_SIZE / sizeof(struct io_uring_buf))
19
19
20
- #define BGID_ARRAY 64
21
-
22
20
/* BIDs are addressed by a 16-bit field in a CQE */
23
21
#define MAX_BIDS_PER_BGID (1 << 16)
24
22
@@ -40,13 +38,9 @@ struct io_buf_free {
40
38
int inuse ;
41
39
};
42
40
43
- static struct io_buffer_list * __io_buffer_get_list (struct io_ring_ctx * ctx ,
44
- struct io_buffer_list * bl ,
45
- unsigned int bgid )
41
+ static inline struct io_buffer_list * __io_buffer_get_list (struct io_ring_ctx * ctx ,
42
+ unsigned int bgid )
46
43
{
47
- if (bl && bgid < BGID_ARRAY )
48
- return & bl [bgid ];
49
-
50
44
return xa_load (& ctx -> io_bl_xa , bgid );
51
45
}
52
46
@@ -55,7 +49,7 @@ static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
55
49
{
56
50
lockdep_assert_held (& ctx -> uring_lock );
57
51
58
- return __io_buffer_get_list (ctx , ctx -> io_bl , bgid );
52
+ return __io_buffer_get_list (ctx , bgid );
59
53
}
60
54
61
55
static int io_buffer_add_list (struct io_ring_ctx * ctx ,
@@ -68,10 +62,6 @@ static int io_buffer_add_list(struct io_ring_ctx *ctx,
68
62
*/
69
63
bl -> bgid = bgid ;
70
64
smp_store_release (& bl -> is_ready , 1 );
71
-
72
- if (bgid < BGID_ARRAY )
73
- return 0 ;
74
-
75
65
return xa_err (xa_store (& ctx -> io_bl_xa , bgid , bl , GFP_KERNEL ));
76
66
}
77
67
@@ -208,24 +198,6 @@ void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
208
198
return ret ;
209
199
}
210
200
211
- static __cold int io_init_bl_list (struct io_ring_ctx * ctx )
212
- {
213
- struct io_buffer_list * bl ;
214
- int i ;
215
-
216
- bl = kcalloc (BGID_ARRAY , sizeof (struct io_buffer_list ), GFP_KERNEL );
217
- if (!bl )
218
- return - ENOMEM ;
219
-
220
- for (i = 0 ; i < BGID_ARRAY ; i ++ ) {
221
- INIT_LIST_HEAD (& bl [i ].buf_list );
222
- bl [i ].bgid = i ;
223
- }
224
-
225
- smp_store_release (& ctx -> io_bl , bl );
226
- return 0 ;
227
- }
228
-
229
201
/*
230
202
* Mark the given mapped range as free for reuse
231
203
*/
@@ -300,13 +272,6 @@ void io_destroy_buffers(struct io_ring_ctx *ctx)
300
272
struct list_head * item , * tmp ;
301
273
struct io_buffer * buf ;
302
274
unsigned long index ;
303
- int i ;
304
-
305
- for (i = 0 ; i < BGID_ARRAY ; i ++ ) {
306
- if (!ctx -> io_bl )
307
- break ;
308
- __io_remove_buffers (ctx , & ctx -> io_bl [i ], -1U );
309
- }
310
275
311
276
xa_for_each (& ctx -> io_bl_xa , index , bl ) {
312
277
xa_erase (& ctx -> io_bl_xa , bl -> bgid );
@@ -489,12 +454,6 @@ int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
489
454
490
455
io_ring_submit_lock (ctx , issue_flags );
491
456
492
- if (unlikely (p -> bgid < BGID_ARRAY && !ctx -> io_bl )) {
493
- ret = io_init_bl_list (ctx );
494
- if (ret )
495
- goto err ;
496
- }
497
-
498
457
bl = io_buffer_get_list (ctx , p -> bgid );
499
458
if (unlikely (!bl )) {
500
459
bl = kzalloc (sizeof (* bl ), GFP_KERNEL_ACCOUNT );
@@ -507,14 +466,9 @@ int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
507
466
if (ret ) {
508
467
/*
509
468
* Doesn't need rcu free as it was never visible, but
510
- * let's keep it consistent throughout. Also can't
511
- * be a lower indexed array group, as adding one
512
- * where lookup failed cannot happen.
469
+ * let's keep it consistent throughout.
513
470
*/
514
- if (p -> bgid >= BGID_ARRAY )
515
- kfree_rcu (bl , rcu );
516
- else
517
- WARN_ON_ONCE (1 );
471
+ kfree_rcu (bl , rcu );
518
472
goto err ;
519
473
}
520
474
}
@@ -679,12 +633,6 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
679
633
if (reg .ring_entries >= 65536 )
680
634
return - EINVAL ;
681
635
682
- if (unlikely (reg .bgid < BGID_ARRAY && !ctx -> io_bl )) {
683
- int ret = io_init_bl_list (ctx );
684
- if (ret )
685
- return ret ;
686
- }
687
-
688
636
bl = io_buffer_get_list (ctx , reg .bgid );
689
637
if (bl ) {
690
638
/* if mapped buffer ring OR classic exists, don't allow */
@@ -734,10 +682,8 @@ int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
734
682
return - EINVAL ;
735
683
736
684
__io_remove_buffers (ctx , bl , -1U );
737
- if (bl -> bgid >= BGID_ARRAY ) {
738
- xa_erase (& ctx -> io_bl_xa , bl -> bgid );
739
- kfree_rcu (bl , rcu );
740
- }
685
+ xa_erase (& ctx -> io_bl_xa , bl -> bgid );
686
+ kfree_rcu (bl , rcu );
741
687
return 0 ;
742
688
}
743
689
@@ -771,7 +717,7 @@ void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid)
771
717
{
772
718
struct io_buffer_list * bl ;
773
719
774
- bl = __io_buffer_get_list (ctx , smp_load_acquire ( & ctx -> io_bl ), bgid );
720
+ bl = __io_buffer_get_list (ctx , bgid );
775
721
776
722
if (!bl || !bl -> is_mmap )
777
723
return NULL ;
0 commit comments