@@ -449,10 +449,18 @@ static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
449
449
if (IS_ERR (n .rings ))
450
450
return PTR_ERR (n .rings );
451
451
452
- n .rings -> sq_ring_mask = p .sq_entries - 1 ;
453
- n .rings -> cq_ring_mask = p .cq_entries - 1 ;
454
- n .rings -> sq_ring_entries = p .sq_entries ;
455
- n .rings -> cq_ring_entries = p .cq_entries ;
452
+ /*
453
+ * At this point n.rings is shared with userspace, just like o.rings
454
+ * is as well. While we don't expect userspace to modify it while
455
+ * a resize is in progress, and it's most likely that userspace will
456
+ * shoot itself in the foot if it does, we can't always assume good
457
+ * intent... Use read/write once helpers from here on to indicate the
458
+ * shared nature of it.
459
+ */
460
+ WRITE_ONCE (n .rings -> sq_ring_mask , p .sq_entries - 1 );
461
+ WRITE_ONCE (n .rings -> cq_ring_mask , p .cq_entries - 1 );
462
+ WRITE_ONCE (n .rings -> sq_ring_entries , p .sq_entries );
463
+ WRITE_ONCE (n .rings -> cq_ring_entries , p .cq_entries );
456
464
457
465
if (copy_to_user (arg , & p , sizeof (p ))) {
458
466
io_register_free_rings (& p , & n );
@@ -509,20 +517,20 @@ static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
509
517
* rings can't hold what is already there, then fail the operation.
510
518
*/
511
519
n .sq_sqes = ptr ;
512
- tail = o .rings -> sq .tail ;
513
- if (tail - o .rings -> sq .head > p .sq_entries )
520
+ tail = READ_ONCE ( o .rings -> sq .tail ) ;
521
+ if (tail - READ_ONCE ( o .rings -> sq .head ) > p .sq_entries )
514
522
goto overflow ;
515
- for (i = o .rings -> sq .head ; i < tail ; i ++ ) {
523
+ for (i = READ_ONCE ( o .rings -> sq .head ) ; i < tail ; i ++ ) {
516
524
unsigned src_head = i & (ctx -> sq_entries - 1 );
517
525
unsigned dst_head = i & (p .sq_entries - 1 );
518
526
519
527
n .sq_sqes [dst_head ] = o .sq_sqes [src_head ];
520
528
}
521
- n .rings -> sq .head = o .rings -> sq .head ;
522
- n .rings -> sq .tail = o .rings -> sq .tail ;
529
+ WRITE_ONCE ( n .rings -> sq .head , READ_ONCE ( o .rings -> sq .head )) ;
530
+ WRITE_ONCE ( n .rings -> sq .tail , READ_ONCE ( o .rings -> sq .tail )) ;
523
531
524
- tail = o .rings -> cq .tail ;
525
- if (tail - o .rings -> cq .head > p .cq_entries ) {
532
+ tail = READ_ONCE ( o .rings -> cq .tail ) ;
533
+ if (tail - READ_ONCE ( o .rings -> cq .head ) > p .cq_entries ) {
526
534
overflow :
527
535
/* restore old rings, and return -EOVERFLOW via cleanup path */
528
536
ctx -> rings = o .rings ;
@@ -531,21 +539,21 @@ static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
531
539
ret = - EOVERFLOW ;
532
540
goto out ;
533
541
}
534
- for (i = o .rings -> cq .head ; i < tail ; i ++ ) {
542
+ for (i = READ_ONCE ( o .rings -> cq .head ) ; i < tail ; i ++ ) {
535
543
unsigned src_head = i & (ctx -> cq_entries - 1 );
536
544
unsigned dst_head = i & (p .cq_entries - 1 );
537
545
538
546
n .rings -> cqes [dst_head ] = o .rings -> cqes [src_head ];
539
547
}
540
- n .rings -> cq .head = o .rings -> cq .head ;
541
- n .rings -> cq .tail = o .rings -> cq .tail ;
548
+ WRITE_ONCE ( n .rings -> cq .head , READ_ONCE ( o .rings -> cq .head )) ;
549
+ WRITE_ONCE ( n .rings -> cq .tail , READ_ONCE ( o .rings -> cq .tail )) ;
542
550
/* invalidate cached cqe refill */
543
551
ctx -> cqe_cached = ctx -> cqe_sentinel = NULL ;
544
552
545
- n .rings -> sq_dropped = o .rings -> sq_dropped ;
546
- n .rings -> sq_flags = o .rings -> sq_flags ;
547
- n .rings -> cq_flags = o .rings -> cq_flags ;
548
- n .rings -> cq_overflow = o .rings -> cq_overflow ;
553
+ WRITE_ONCE ( n .rings -> sq_dropped , READ_ONCE ( o .rings -> sq_dropped )) ;
554
+ WRITE_ONCE ( n .rings -> sq_flags , READ_ONCE ( o .rings -> sq_flags )) ;
555
+ WRITE_ONCE ( n .rings -> cq_flags , READ_ONCE ( o .rings -> cq_flags )) ;
556
+ WRITE_ONCE ( n .rings -> cq_overflow , READ_ONCE ( o .rings -> cq_overflow )) ;
549
557
550
558
/* all done, store old pointers and assign new ones */
551
559
if (!(ctx -> flags & IORING_SETUP_NO_SQARRAY ))
0 commit comments