@@ -449,10 +449,18 @@ static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
449449 if (IS_ERR (n .rings ))
450450 return PTR_ERR (n .rings );
451451
452- n .rings -> sq_ring_mask = p .sq_entries - 1 ;
453- n .rings -> cq_ring_mask = p .cq_entries - 1 ;
454- n .rings -> sq_ring_entries = p .sq_entries ;
455- n .rings -> cq_ring_entries = p .cq_entries ;
452+ /*
453+ * At this point n.rings is shared with userspace, just like o.rings
454+ * is as well. While we don't expect userspace to modify it while
455+ * a resize is in progress, and it's most likely that userspace will
456+ * shoot itself in the foot if it does, we can't always assume good
457+ * intent... Use read/write once helpers from here on to indicate the
458+ * shared nature of it.
459+ */
460+ WRITE_ONCE (n .rings -> sq_ring_mask , p .sq_entries - 1 );
461+ WRITE_ONCE (n .rings -> cq_ring_mask , p .cq_entries - 1 );
462+ WRITE_ONCE (n .rings -> sq_ring_entries , p .sq_entries );
463+ WRITE_ONCE (n .rings -> cq_ring_entries , p .cq_entries );
456464
457465 if (copy_to_user (arg , & p , sizeof (p ))) {
458466 io_register_free_rings (& p , & n );
@@ -509,20 +517,20 @@ static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
509517 * rings can't hold what is already there, then fail the operation.
510518 */
511519 n .sq_sqes = ptr ;
512- tail = o .rings -> sq .tail ;
513- if (tail - o .rings -> sq .head > p .sq_entries )
520+ tail = READ_ONCE ( o .rings -> sq .tail ) ;
521+ if (tail - READ_ONCE ( o .rings -> sq .head ) > p .sq_entries )
514522 goto overflow ;
515- for (i = o .rings -> sq .head ; i < tail ; i ++ ) {
523+ for (i = READ_ONCE ( o .rings -> sq .head ) ; i < tail ; i ++ ) {
516524 unsigned src_head = i & (ctx -> sq_entries - 1 );
517525 unsigned dst_head = i & (p .sq_entries - 1 );
518526
519527 n .sq_sqes [dst_head ] = o .sq_sqes [src_head ];
520528 }
521- n .rings -> sq .head = o .rings -> sq .head ;
522- n .rings -> sq .tail = o .rings -> sq .tail ;
529+ WRITE_ONCE ( n .rings -> sq .head , READ_ONCE ( o .rings -> sq .head )) ;
530+ WRITE_ONCE ( n .rings -> sq .tail , READ_ONCE ( o .rings -> sq .tail )) ;
523531
524- tail = o .rings -> cq .tail ;
525- if (tail - o .rings -> cq .head > p .cq_entries ) {
532+ tail = READ_ONCE ( o .rings -> cq .tail ) ;
533+ if (tail - READ_ONCE ( o .rings -> cq .head ) > p .cq_entries ) {
526534overflow :
527535 /* restore old rings, and return -EOVERFLOW via cleanup path */
528536 ctx -> rings = o .rings ;
@@ -531,21 +539,21 @@ static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
531539 ret = - EOVERFLOW ;
532540 goto out ;
533541 }
534- for (i = o .rings -> cq .head ; i < tail ; i ++ ) {
542+ for (i = READ_ONCE ( o .rings -> cq .head ) ; i < tail ; i ++ ) {
535543 unsigned src_head = i & (ctx -> cq_entries - 1 );
536544 unsigned dst_head = i & (p .cq_entries - 1 );
537545
538546 n .rings -> cqes [dst_head ] = o .rings -> cqes [src_head ];
539547 }
540- n .rings -> cq .head = o .rings -> cq .head ;
541- n .rings -> cq .tail = o .rings -> cq .tail ;
548+ WRITE_ONCE ( n .rings -> cq .head , READ_ONCE ( o .rings -> cq .head )) ;
549+ WRITE_ONCE ( n .rings -> cq .tail , READ_ONCE ( o .rings -> cq .tail )) ;
542550 /* invalidate cached cqe refill */
543551 ctx -> cqe_cached = ctx -> cqe_sentinel = NULL ;
544552
545- n .rings -> sq_dropped = o .rings -> sq_dropped ;
546- n .rings -> sq_flags = o .rings -> sq_flags ;
547- n .rings -> cq_flags = o .rings -> cq_flags ;
548- n .rings -> cq_overflow = o .rings -> cq_overflow ;
553+ WRITE_ONCE ( n .rings -> sq_dropped , READ_ONCE ( o .rings -> sq_dropped )) ;
554+ WRITE_ONCE ( n .rings -> sq_flags , READ_ONCE ( o .rings -> sq_flags )) ;
555+ WRITE_ONCE ( n .rings -> cq_flags , READ_ONCE ( o .rings -> cq_flags )) ;
556+ WRITE_ONCE ( n .rings -> cq_overflow , READ_ONCE ( o .rings -> cq_overflow )) ;
549557
550558 /* all done, store old pointers and assign new ones */
551559 if (!(ctx -> flags & IORING_SETUP_NO_SQARRAY ))
0 commit comments