@@ -13,8 +13,22 @@ static enum io_uring_op ublk_to_uring_op(const struct ublksrv_io_desc *iod, int
13
13
assert (0 );
14
14
}
15
15
16
+ static int loop_queue_flush_io (struct ublk_queue * q , const struct ublksrv_io_desc * iod , int tag )
17
+ {
18
+ unsigned ublk_op = ublksrv_get_op (iod );
19
+ struct io_uring_sqe * sqe [1 ];
20
+
21
+ ublk_queue_alloc_sqes (q , sqe , 1 );
22
+ io_uring_prep_fsync (sqe [0 ], 1 /*fds[1]*/ , IORING_FSYNC_DATASYNC );
23
+ io_uring_sqe_set_flags (sqe [0 ], IOSQE_FIXED_FILE );
24
+ /* bit63 marks us as tgt io */
25
+ sqe [0 ]-> user_data = build_user_data (tag , ublk_op , 0 , 1 );
26
+ return 1 ;
27
+ }
28
+
16
29
static int loop_queue_tgt_rw_io (struct ublk_queue * q , const struct ublksrv_io_desc * iod , int tag )
17
30
{
31
+ unsigned ublk_op = ublksrv_get_op (iod );
18
32
int zc = ublk_queue_use_zc (q );
19
33
enum io_uring_op op = ublk_to_uring_op (iod , zc );
20
34
struct io_uring_sqe * sqe [3 ];
@@ -29,98 +43,87 @@ static int loop_queue_tgt_rw_io(struct ublk_queue *q, const struct ublksrv_io_de
29
43
iod -> nr_sectors << 9 ,
30
44
iod -> start_sector << 9 );
31
45
io_uring_sqe_set_flags (sqe [0 ], IOSQE_FIXED_FILE );
32
- q -> io_inflight ++ ;
33
46
/* bit63 marks us as tgt io */
34
- sqe [0 ]-> user_data = build_user_data (tag , op , UBLK_IO_TGT_NORMAL , 1 );
35
- return 0 ;
47
+ sqe [0 ]-> user_data = build_user_data (tag , ublk_op , 0 , 1 );
48
+ return 1 ;
36
49
}
37
50
38
51
ublk_queue_alloc_sqes (q , sqe , 3 );
39
52
40
53
io_uring_prep_buf_register (sqe [0 ], 0 , tag , q -> q_id , tag );
41
- sqe [0 ]-> user_data = build_user_data ( tag , 0xfe , 1 , 1 ) ;
42
- sqe [0 ]-> flags |= IOSQE_CQE_SKIP_SUCCESS ;
43
- sqe [0 ]-> flags |= IOSQE_IO_LINK ;
54
+ sqe [0 ]-> flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK ;
55
+ sqe [0 ]-> user_data = build_user_data ( tag ,
56
+ ublk_cmd_op_nr ( sqe [0 ]-> cmd_op ), 0 , 1 ) ;
44
57
45
58
io_uring_prep_rw (op , sqe [1 ], 1 /*fds[1]*/ , 0 ,
46
59
iod -> nr_sectors << 9 ,
47
60
iod -> start_sector << 9 );
48
61
sqe [1 ]-> buf_index = tag ;
49
- sqe [1 ]-> flags |= IOSQE_FIXED_FILE ;
50
- sqe [1 ]-> flags |= IOSQE_IO_LINK ;
51
- sqe [1 ]-> user_data = build_user_data (tag , op , UBLK_IO_TGT_ZC_OP , 1 );
52
- q -> io_inflight ++ ;
62
+ sqe [1 ]-> flags |= IOSQE_FIXED_FILE | IOSQE_IO_HARDLINK ;
63
+ sqe [1 ]-> user_data = build_user_data (tag , ublk_op , 0 , 1 );
53
64
54
65
io_uring_prep_buf_unregister (sqe [2 ], 0 , tag , q -> q_id , tag );
55
- sqe [2 ]-> user_data = build_user_data (tag , 0xff , UBLK_IO_TGT_ZC_BUF , 1 );
56
- q -> io_inflight ++ ;
66
+ sqe [2 ]-> user_data = build_user_data (tag , ublk_cmd_op_nr (sqe [2 ]-> cmd_op ), 0 , 1 );
57
67
58
- return 0 ;
68
+ return 2 ;
59
69
}
60
70
61
71
static int loop_queue_tgt_io (struct ublk_queue * q , int tag )
62
72
{
63
73
const struct ublksrv_io_desc * iod = ublk_get_iod (q , tag );
64
74
unsigned ublk_op = ublksrv_get_op (iod );
65
- struct io_uring_sqe * sqe [ 1 ] ;
75
+ int ret ;
66
76
67
77
switch (ublk_op ) {
68
78
case UBLK_IO_OP_FLUSH :
69
- ublk_queue_alloc_sqes (q , sqe , 1 );
70
- if (!sqe [0 ])
71
- return - ENOMEM ;
72
- io_uring_prep_fsync (sqe [0 ], 1 /*fds[1]*/ , IORING_FSYNC_DATASYNC );
73
- io_uring_sqe_set_flags (sqe [0 ], IOSQE_FIXED_FILE );
74
- q -> io_inflight ++ ;
75
- sqe [0 ]-> user_data = build_user_data (tag , ublk_op , UBLK_IO_TGT_NORMAL , 1 );
79
+ ret = loop_queue_flush_io (q , iod , tag );
76
80
break ;
77
81
case UBLK_IO_OP_WRITE_ZEROES :
78
82
case UBLK_IO_OP_DISCARD :
79
- return - ENOTSUP ;
83
+ ret = - ENOTSUP ;
84
+ break ;
80
85
case UBLK_IO_OP_READ :
81
86
case UBLK_IO_OP_WRITE :
82
- loop_queue_tgt_rw_io (q , iod , tag );
87
+ ret = loop_queue_tgt_rw_io (q , iod , tag );
83
88
break ;
84
89
default :
85
- return - EINVAL ;
90
+ ret = - EINVAL ;
91
+ break ;
86
92
}
87
93
88
94
ublk_dbg (UBLK_DBG_IO , "%s: tag %d ublk io %x %llx %u\n" , __func__ , tag ,
89
95
iod -> op_flags , iod -> start_sector , iod -> nr_sectors << 9 );
90
- return 1 ;
96
+ return ret ;
91
97
}
92
98
93
99
static int ublk_loop_queue_io (struct ublk_queue * q , int tag )
94
100
{
95
101
int queued = loop_queue_tgt_io (q , tag );
96
102
97
- if (queued < 0 )
98
- ublk_complete_io (q , tag , queued );
99
-
103
+ ublk_queued_tgt_io (q , tag , queued );
100
104
return 0 ;
101
105
}
102
106
103
107
static void ublk_loop_io_done (struct ublk_queue * q , int tag ,
104
108
const struct io_uring_cqe * cqe )
105
109
{
106
- int cqe_tag = user_data_to_tag (cqe -> user_data );
107
- unsigned tgt_data = user_data_to_tgt_data (cqe -> user_data );
108
- int res = cqe -> res ;
110
+ unsigned op = user_data_to_op (cqe -> user_data );
111
+ struct ublk_io * io = ublk_get_io (q , tag );
112
+
113
+ if (cqe -> res < 0 || op != ublk_cmd_op_nr (UBLK_U_IO_UNREGISTER_IO_BUF )) {
114
+ if (!io -> result )
115
+ io -> result = cqe -> res ;
116
+ if (cqe -> res < 0 )
117
+ ublk_err ("%s: io failed op %x user_data %lx\n" ,
118
+ __func__ , op , cqe -> user_data );
119
+ }
109
120
110
- if (res < 0 || tgt_data == UBLK_IO_TGT_NORMAL )
111
- goto complete ;
121
+ /* buffer register op is IOSQE_CQE_SKIP_SUCCESS */
122
+ if (op == ublk_cmd_op_nr (UBLK_U_IO_REGISTER_IO_BUF ))
123
+ io -> tgt_ios += 1 ;
112
124
113
- if (tgt_data == UBLK_IO_TGT_ZC_OP ) {
114
- ublk_set_io_res (q , tag , cqe -> res );
115
- goto exit ;
116
- }
117
- assert (tgt_data == UBLK_IO_TGT_ZC_BUF );
118
- res = ublk_get_io_res (q , tag );
119
- complete :
120
- assert (tag == cqe_tag );
121
- ublk_complete_io (q , tag , res );
122
- exit :
123
- q -> io_inflight -- ;
125
+ if (ublk_completed_tgt_io (q , tag ))
126
+ ublk_complete_io (q , tag , io -> result );
124
127
}
125
128
126
129
static int ublk_loop_tgt_init (const struct dev_ctx * ctx , struct ublk_dev * dev )
0 commit comments