@@ -5608,7 +5608,7 @@ static inline void io_queue_link_head(struct io_kiocb *req)
5608
5608
IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
5609
5609
IOSQE_BUFFER_SELECT)
5610
5610
5611
- static bool io_submit_sqe (struct io_kiocb * req , const struct io_uring_sqe * sqe ,
5611
+ static int io_submit_sqe (struct io_kiocb * req , const struct io_uring_sqe * sqe ,
5612
5612
struct io_submit_state * state , struct io_kiocb * * link )
5613
5613
{
5614
5614
struct io_ring_ctx * ctx = req -> ctx ;
@@ -5618,24 +5618,18 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
5618
5618
sqe_flags = READ_ONCE (sqe -> flags );
5619
5619
5620
5620
/* enforce forwards compatibility on users */
5621
- if (unlikely (sqe_flags & ~SQE_VALID_FLAGS )) {
5622
- ret = - EINVAL ;
5623
- goto err_req ;
5624
- }
5621
+ if (unlikely (sqe_flags & ~SQE_VALID_FLAGS ))
5622
+ return - EINVAL ;
5625
5623
5626
5624
if ((sqe_flags & IOSQE_BUFFER_SELECT ) &&
5627
- !io_op_defs [req -> opcode ].buffer_select ) {
5628
- ret = - EOPNOTSUPP ;
5629
- goto err_req ;
5630
- }
5625
+ !io_op_defs [req -> opcode ].buffer_select )
5626
+ return - EOPNOTSUPP ;
5631
5627
5632
5628
id = READ_ONCE (sqe -> personality );
5633
5629
if (id ) {
5634
5630
req -> work .creds = idr_find (& ctx -> personality_idr , id );
5635
- if (unlikely (!req -> work .creds )) {
5636
- ret = - EINVAL ;
5637
- goto err_req ;
5638
- }
5631
+ if (unlikely (!req -> work .creds ))
5632
+ return - EINVAL ;
5639
5633
get_cred (req -> work .creds );
5640
5634
}
5641
5635
@@ -5646,12 +5640,8 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
5646
5640
5647
5641
fd = READ_ONCE (sqe -> fd );
5648
5642
ret = io_req_set_file (state , req , fd , sqe_flags );
5649
- if (unlikely (ret )) {
5650
- err_req :
5651
- io_cqring_add_event (req , ret );
5652
- io_double_put_req (req );
5653
- return false;
5654
- }
5643
+ if (unlikely (ret ))
5644
+ return ret ;
5655
5645
5656
5646
/*
5657
5647
* If we already have a head request, queue this one for async
@@ -5674,16 +5664,14 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
5674
5664
head -> flags |= REQ_F_IO_DRAIN ;
5675
5665
ctx -> drain_next = 1 ;
5676
5666
}
5677
- if (io_alloc_async_ctx (req )) {
5678
- ret = - EAGAIN ;
5679
- goto err_req ;
5680
- }
5667
+ if (io_alloc_async_ctx (req ))
5668
+ return - EAGAIN ;
5681
5669
5682
5670
ret = io_req_defer_prep (req , sqe );
5683
5671
if (ret ) {
5684
5672
/* fail even hard links since we don't submit */
5685
5673
head -> flags |= REQ_F_FAIL_LINK ;
5686
- goto err_req ;
5674
+ return ret ;
5687
5675
}
5688
5676
trace_io_uring_link (ctx , req , head );
5689
5677
list_add_tail (& req -> link_list , & head -> link_list );
@@ -5702,10 +5690,9 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
5702
5690
req -> flags |= REQ_F_LINK ;
5703
5691
INIT_LIST_HEAD (& req -> link_list );
5704
5692
5705
- if (io_alloc_async_ctx (req )) {
5706
- ret = - EAGAIN ;
5707
- goto err_req ;
5708
- }
5693
+ if (io_alloc_async_ctx (req ))
5694
+ return - EAGAIN ;
5695
+
5709
5696
ret = io_req_defer_prep (req , sqe );
5710
5697
if (ret )
5711
5698
req -> flags |= REQ_F_FAIL_LINK ;
@@ -5715,7 +5702,7 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
5715
5702
}
5716
5703
}
5717
5704
5718
- return true ;
5705
+ return 0 ;
5719
5706
}
5720
5707
5721
5708
/*
@@ -5880,8 +5867,9 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
5880
5867
req -> needs_fixed_file = async ;
5881
5868
trace_io_uring_submit_sqe (ctx , req -> opcode , req -> user_data ,
5882
5869
true, async );
5883
- if (!io_submit_sqe (req , sqe , statep , & link ))
5884
- break ;
5870
+ err = io_submit_sqe (req , sqe , statep , & link );
5871
+ if (err )
5872
+ goto fail_req ;
5885
5873
}
5886
5874
5887
5875
if (unlikely (submitted != nr )) {
0 commit comments