@@ -5607,44 +5607,11 @@ static inline void io_queue_link_head(struct io_kiocb *req)
5607
5607
io_queue_sqe (req , NULL );
5608
5608
}
5609
5609
5610
- #define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
5611
- IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
5612
- IOSQE_BUFFER_SELECT)
5613
-
5614
5610
static int io_submit_sqe (struct io_kiocb * req , const struct io_uring_sqe * sqe ,
5615
5611
struct io_submit_state * state , struct io_kiocb * * link )
5616
5612
{
5617
5613
struct io_ring_ctx * ctx = req -> ctx ;
5618
- unsigned int sqe_flags ;
5619
- int ret , id , fd ;
5620
-
5621
- sqe_flags = READ_ONCE (sqe -> flags );
5622
-
5623
- /* enforce forwards compatibility on users */
5624
- if (unlikely (sqe_flags & ~SQE_VALID_FLAGS ))
5625
- return - EINVAL ;
5626
-
5627
- if ((sqe_flags & IOSQE_BUFFER_SELECT ) &&
5628
- !io_op_defs [req -> opcode ].buffer_select )
5629
- return - EOPNOTSUPP ;
5630
-
5631
- id = READ_ONCE (sqe -> personality );
5632
- if (id ) {
5633
- req -> work .creds = idr_find (& ctx -> personality_idr , id );
5634
- if (unlikely (!req -> work .creds ))
5635
- return - EINVAL ;
5636
- get_cred (req -> work .creds );
5637
- }
5638
-
5639
- /* same numerical values with corresponding REQ_F_*, safe to copy */
5640
- req -> flags |= sqe_flags & (IOSQE_IO_DRAIN | IOSQE_IO_HARDLINK |
5641
- IOSQE_ASYNC | IOSQE_FIXED_FILE |
5642
- IOSQE_BUFFER_SELECT | IOSQE_IO_LINK );
5643
-
5644
- fd = READ_ONCE (sqe -> fd );
5645
- ret = io_req_set_file (state , req , fd , sqe_flags );
5646
- if (unlikely (ret ))
5647
- return ret ;
5614
+ int ret ;
5648
5615
5649
5616
/*
5650
5617
* If we already have a head request, queue this one for async
@@ -5663,7 +5630,7 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
5663
5630
* next after the link request. The last one is done via
5664
5631
* drain_next flag to persist the effect across calls.
5665
5632
*/
5666
- if (sqe_flags & IOSQE_IO_DRAIN ) {
5633
+ if (req -> flags & REQ_F_IO_DRAIN ) {
5667
5634
head -> flags |= REQ_F_IO_DRAIN ;
5668
5635
ctx -> drain_next = 1 ;
5669
5636
}
@@ -5680,16 +5647,16 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
5680
5647
list_add_tail (& req -> link_list , & head -> link_list );
5681
5648
5682
5649
/* last request of a link, enqueue the link */
5683
- if (!(sqe_flags & (IOSQE_IO_LINK | IOSQE_IO_HARDLINK ))) {
5650
+ if (!(req -> flags & (REQ_F_LINK | REQ_F_HARDLINK ))) {
5684
5651
io_queue_link_head (head );
5685
5652
* link = NULL ;
5686
5653
}
5687
5654
} else {
5688
5655
if (unlikely (ctx -> drain_next )) {
5689
5656
req -> flags |= REQ_F_IO_DRAIN ;
5690
- req -> ctx -> drain_next = 0 ;
5657
+ ctx -> drain_next = 0 ;
5691
5658
}
5692
- if (sqe_flags & (IOSQE_IO_LINK | IOSQE_IO_HARDLINK )) {
5659
+ if (req -> flags & (REQ_F_LINK | REQ_F_HARDLINK )) {
5693
5660
req -> flags |= REQ_F_LINK_HEAD ;
5694
5661
INIT_LIST_HEAD (& req -> link_list );
5695
5662
@@ -5779,9 +5746,17 @@ static inline void io_consume_sqe(struct io_ring_ctx *ctx)
5779
5746
ctx -> cached_sq_head ++ ;
5780
5747
}
5781
5748
5782
- static void io_init_req (struct io_ring_ctx * ctx , struct io_kiocb * req ,
5783
- const struct io_uring_sqe * sqe )
5749
+ #define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
5750
+ IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
5751
+ IOSQE_BUFFER_SELECT)
5752
+
5753
+ static int io_init_req (struct io_ring_ctx * ctx , struct io_kiocb * req ,
5754
+ const struct io_uring_sqe * sqe ,
5755
+ struct io_submit_state * state , bool async )
5784
5756
{
5757
+ unsigned int sqe_flags ;
5758
+ int id , fd ;
5759
+
5785
5760
/*
5786
5761
* All io need record the previous position, if LINK vs DARIN,
5787
5762
* it can be used to mark the position of the first IO in the
@@ -5798,7 +5773,42 @@ static void io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
5798
5773
refcount_set (& req -> refs , 2 );
5799
5774
req -> task = NULL ;
5800
5775
req -> result = 0 ;
5776
+ req -> needs_fixed_file = async ;
5801
5777
INIT_IO_WORK (& req -> work , io_wq_submit_work );
5778
+
5779
+ if (unlikely (req -> opcode >= IORING_OP_LAST ))
5780
+ return - EINVAL ;
5781
+
5782
+ if (io_op_defs [req -> opcode ].needs_mm && !current -> mm ) {
5783
+ if (unlikely (!mmget_not_zero (ctx -> sqo_mm )))
5784
+ return - EFAULT ;
5785
+ use_mm (ctx -> sqo_mm );
5786
+ }
5787
+
5788
+ sqe_flags = READ_ONCE (sqe -> flags );
5789
+ /* enforce forwards compatibility on users */
5790
+ if (unlikely (sqe_flags & ~SQE_VALID_FLAGS ))
5791
+ return - EINVAL ;
5792
+
5793
+ if ((sqe_flags & IOSQE_BUFFER_SELECT ) &&
5794
+ !io_op_defs [req -> opcode ].buffer_select )
5795
+ return - EOPNOTSUPP ;
5796
+
5797
+ id = READ_ONCE (sqe -> personality );
5798
+ if (id ) {
5799
+ req -> work .creds = idr_find (& ctx -> personality_idr , id );
5800
+ if (unlikely (!req -> work .creds ))
5801
+ return - EINVAL ;
5802
+ get_cred (req -> work .creds );
5803
+ }
5804
+
5805
+ /* same numerical values with corresponding REQ_F_*, safe to copy */
5806
+ req -> flags |= sqe_flags & (IOSQE_IO_DRAIN | IOSQE_IO_HARDLINK |
5807
+ IOSQE_ASYNC | IOSQE_FIXED_FILE |
5808
+ IOSQE_BUFFER_SELECT | IOSQE_IO_LINK );
5809
+
5810
+ fd = READ_ONCE (sqe -> fd );
5811
+ return io_req_set_file (state , req , fd , sqe_flags );
5802
5812
}
5803
5813
5804
5814
static int io_submit_sqes (struct io_ring_ctx * ctx , unsigned int nr ,
@@ -5846,28 +5856,18 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
5846
5856
break ;
5847
5857
}
5848
5858
5849
- io_init_req (ctx , req , sqe );
5859
+ err = io_init_req (ctx , req , sqe , statep , async );
5850
5860
io_consume_sqe (ctx );
5851
5861
/* will complete beyond this point, count as submitted */
5852
5862
submitted ++ ;
5853
5863
5854
- if (unlikely (req -> opcode >= IORING_OP_LAST )) {
5855
- err = - EINVAL ;
5864
+ if (unlikely (err )) {
5856
5865
fail_req :
5857
5866
io_cqring_add_event (req , err );
5858
5867
io_double_put_req (req );
5859
5868
break ;
5860
5869
}
5861
5870
5862
- if (io_op_defs [req -> opcode ].needs_mm && !current -> mm ) {
5863
- if (unlikely (!mmget_not_zero (ctx -> sqo_mm ))) {
5864
- err = - EFAULT ;
5865
- goto fail_req ;
5866
- }
5867
- use_mm (ctx -> sqo_mm );
5868
- }
5869
-
5870
- req -> needs_fixed_file = async ;
5871
5871
trace_io_uring_submit_sqe (ctx , req -> opcode , req -> user_data ,
5872
5872
true, async );
5873
5873
err = io_submit_sqe (req , sqe , statep , & link );
0 commit comments