1717
1818#define IO_BUFFER_LIST_BUF_PER_PAGE (PAGE_SIZE / sizeof(struct io_uring_buf))
1919
20- #define BGID_ARRAY 64
21-
2220/* BIDs are addressed by a 16-bit field in a CQE */
2321#define MAX_BIDS_PER_BGID (1 << 16)
2422
@@ -40,13 +38,9 @@ struct io_buf_free {
4038 int inuse ;
4139};
4240
43- static struct io_buffer_list * __io_buffer_get_list (struct io_ring_ctx * ctx ,
44- struct io_buffer_list * bl ,
45- unsigned int bgid )
41+ static inline struct io_buffer_list * __io_buffer_get_list (struct io_ring_ctx * ctx ,
42+ unsigned int bgid )
4643{
47- if (bl && bgid < BGID_ARRAY )
48- return & bl [bgid ];
49-
5044 return xa_load (& ctx -> io_bl_xa , bgid );
5145}
5246
@@ -55,7 +49,7 @@ static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
5549{
5650 lockdep_assert_held (& ctx -> uring_lock );
5751
58- return __io_buffer_get_list (ctx , ctx -> io_bl , bgid );
52+ return __io_buffer_get_list (ctx , bgid );
5953}
6054
6155static int io_buffer_add_list (struct io_ring_ctx * ctx ,
@@ -68,10 +62,6 @@ static int io_buffer_add_list(struct io_ring_ctx *ctx,
6862 */
6963 bl -> bgid = bgid ;
7064 smp_store_release (& bl -> is_ready , 1 );
71-
72- if (bgid < BGID_ARRAY )
73- return 0 ;
74-
7565 return xa_err (xa_store (& ctx -> io_bl_xa , bgid , bl , GFP_KERNEL ));
7666}
7767
@@ -208,24 +198,6 @@ void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
208198 return ret ;
209199}
210200
211- static __cold int io_init_bl_list (struct io_ring_ctx * ctx )
212- {
213- struct io_buffer_list * bl ;
214- int i ;
215-
216- bl = kcalloc (BGID_ARRAY , sizeof (struct io_buffer_list ), GFP_KERNEL );
217- if (!bl )
218- return - ENOMEM ;
219-
220- for (i = 0 ; i < BGID_ARRAY ; i ++ ) {
221- INIT_LIST_HEAD (& bl [i ].buf_list );
222- bl [i ].bgid = i ;
223- }
224-
225- smp_store_release (& ctx -> io_bl , bl );
226- return 0 ;
227- }
228-
229201/*
230202 * Mark the given mapped range as free for reuse
231203 */
@@ -300,13 +272,6 @@ void io_destroy_buffers(struct io_ring_ctx *ctx)
300272 struct list_head * item , * tmp ;
301273 struct io_buffer * buf ;
302274 unsigned long index ;
303- int i ;
304-
305- for (i = 0 ; i < BGID_ARRAY ; i ++ ) {
306- if (!ctx -> io_bl )
307- break ;
308- __io_remove_buffers (ctx , & ctx -> io_bl [i ], -1U );
309- }
310275
311276 xa_for_each (& ctx -> io_bl_xa , index , bl ) {
312277 xa_erase (& ctx -> io_bl_xa , bl -> bgid );
@@ -489,12 +454,6 @@ int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
489454
490455 io_ring_submit_lock (ctx , issue_flags );
491456
492- if (unlikely (p -> bgid < BGID_ARRAY && !ctx -> io_bl )) {
493- ret = io_init_bl_list (ctx );
494- if (ret )
495- goto err ;
496- }
497-
498457 bl = io_buffer_get_list (ctx , p -> bgid );
499458 if (unlikely (!bl )) {
500459 bl = kzalloc (sizeof (* bl ), GFP_KERNEL_ACCOUNT );
@@ -507,14 +466,9 @@ int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
507466 if (ret ) {
508467 /*
509468 * Doesn't need rcu free as it was never visible, but
510- * let's keep it consistent throughout. Also can't
511- * be a lower indexed array group, as adding one
512- * where lookup failed cannot happen.
469+ * let's keep it consistent throughout.
513470 */
514- if (p -> bgid >= BGID_ARRAY )
515- kfree_rcu (bl , rcu );
516- else
517- WARN_ON_ONCE (1 );
471+ kfree_rcu (bl , rcu );
518472 goto err ;
519473 }
520474 }
@@ -679,12 +633,6 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
679633 if (reg .ring_entries >= 65536 )
680634 return - EINVAL ;
681635
682- if (unlikely (reg .bgid < BGID_ARRAY && !ctx -> io_bl )) {
683- int ret = io_init_bl_list (ctx );
684- if (ret )
685- return ret ;
686- }
687-
688636 bl = io_buffer_get_list (ctx , reg .bgid );
689637 if (bl ) {
690638 /* if mapped buffer ring OR classic exists, don't allow */
@@ -734,10 +682,8 @@ int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
734682 return - EINVAL ;
735683
736684 __io_remove_buffers (ctx , bl , -1U );
737- if (bl -> bgid >= BGID_ARRAY ) {
738- xa_erase (& ctx -> io_bl_xa , bl -> bgid );
739- kfree_rcu (bl , rcu );
740- }
685+ xa_erase (& ctx -> io_bl_xa , bl -> bgid );
686+ kfree_rcu (bl , rcu );
741687 return 0 ;
742688}
743689
@@ -771,7 +717,7 @@ void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid)
771717{
772718 struct io_buffer_list * bl ;
773719
774- bl = __io_buffer_get_list (ctx , smp_load_acquire ( & ctx -> io_bl ), bgid );
720+ bl = __io_buffer_get_list (ctx , bgid );
775721
776722 if (!bl || !bl -> is_mmap )
777723 return NULL ;
0 commit comments