@@ -916,7 +916,6 @@ void io_send_zc_cleanup(struct io_kiocb *req)
916
916
kfree (io -> free_iov );
917
917
}
918
918
if (zc -> notif ) {
919
- zc -> notif -> flags |= REQ_F_CQE_SKIP ;
920
919
io_notif_flush (zc -> notif );
921
920
zc -> notif = NULL ;
922
921
}
@@ -1047,7 +1046,7 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
1047
1046
struct msghdr msg ;
1048
1047
struct iovec iov ;
1049
1048
struct socket * sock ;
1050
- unsigned msg_flags , cflags ;
1049
+ unsigned msg_flags ;
1051
1050
int ret , min_ret = 0 ;
1052
1051
1053
1052
sock = sock_from_file (req -> file );
@@ -1115,8 +1114,6 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
1115
1114
req -> flags |= REQ_F_PARTIAL_IO ;
1116
1115
return io_setup_async_addr (req , & __address , issue_flags );
1117
1116
}
1118
- if (ret < 0 && !zc -> done_io )
1119
- zc -> notif -> flags |= REQ_F_CQE_SKIP ;
1120
1117
if (ret == - ERESTARTSYS )
1121
1118
ret = - EINTR ;
1122
1119
req_set_fail (req );
@@ -1129,8 +1126,7 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
1129
1126
1130
1127
io_notif_flush (zc -> notif );
1131
1128
req -> flags &= ~REQ_F_NEED_CLEANUP ;
1132
- cflags = ret >= 0 ? IORING_CQE_F_MORE : 0 ;
1133
- io_req_set_res (req , ret , cflags );
1129
+ io_req_set_res (req , ret , IORING_CQE_F_MORE );
1134
1130
return IOU_OK ;
1135
1131
}
1136
1132
@@ -1139,7 +1135,7 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1139
1135
struct io_sr_msg * sr = io_kiocb_to_cmd (req , struct io_sr_msg );
1140
1136
struct io_async_msghdr iomsg , * kmsg ;
1141
1137
struct socket * sock ;
1142
- unsigned flags , cflags ;
1138
+ unsigned flags ;
1143
1139
int ret , min_ret = 0 ;
1144
1140
1145
1141
sock = sock_from_file (req -> file );
@@ -1178,8 +1174,6 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1178
1174
req -> flags |= REQ_F_PARTIAL_IO ;
1179
1175
return io_setup_async_msg (req , kmsg , issue_flags );
1180
1176
}
1181
- if (ret < 0 && !sr -> done_io )
1182
- sr -> notif -> flags |= REQ_F_CQE_SKIP ;
1183
1177
if (ret == - ERESTARTSYS )
1184
1178
ret = - EINTR ;
1185
1179
req_set_fail (req );
@@ -1196,27 +1190,20 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1196
1190
1197
1191
io_notif_flush (sr -> notif );
1198
1192
req -> flags &= ~REQ_F_NEED_CLEANUP ;
1199
- cflags = ret >= 0 ? IORING_CQE_F_MORE : 0 ;
1200
- io_req_set_res (req , ret , cflags );
1193
+ io_req_set_res (req , ret , IORING_CQE_F_MORE );
1201
1194
return IOU_OK ;
1202
1195
}
1203
1196
1204
1197
void io_sendrecv_fail (struct io_kiocb * req )
1205
1198
{
1206
1199
struct io_sr_msg * sr = io_kiocb_to_cmd (req , struct io_sr_msg );
1207
- int res = req -> cqe .res ;
1208
1200
1209
1201
if (req -> flags & REQ_F_PARTIAL_IO )
1210
- res = sr -> done_io ;
1202
+ req -> cqe .res = sr -> done_io ;
1203
+
1211
1204
if ((req -> flags & REQ_F_NEED_CLEANUP ) &&
1212
- (req -> opcode == IORING_OP_SEND_ZC || req -> opcode == IORING_OP_SENDMSG_ZC )) {
1213
- /* preserve notification for partial I/O */
1214
- if (res < 0 )
1215
- sr -> notif -> flags |= REQ_F_CQE_SKIP ;
1216
- io_notif_flush (sr -> notif );
1217
- sr -> notif = NULL ;
1218
- }
1219
- io_req_set_res (req , res , req -> cqe .flags );
1205
+ (req -> opcode == IORING_OP_SEND_ZC || req -> opcode == IORING_OP_SENDMSG_ZC ))
1206
+ req -> cqe .flags |= IORING_CQE_F_MORE ;
1220
1207
}
1221
1208
1222
1209
int io_accept_prep (struct io_kiocb * req , const struct io_uring_sqe * sqe )
0 commit comments