@@ -97,6 +97,11 @@ struct io_recvzc {
97
97
struct io_zcrx_ifq * ifq ;
98
98
};
99
99
100
+ static int io_sg_from_iter_iovec (struct sk_buff * skb ,
101
+ struct iov_iter * from , size_t length );
102
+ static int io_sg_from_iter (struct sk_buff * skb ,
103
+ struct iov_iter * from , size_t length );
104
+
100
105
int io_shutdown_prep (struct io_kiocb * req , const struct io_uring_sqe * sqe )
101
106
{
102
107
struct io_shutdown * shutdown = io_kiocb_to_cmd (req , struct io_shutdown );
@@ -1266,6 +1271,7 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1266
1271
{
1267
1272
struct io_sr_msg * zc = io_kiocb_to_cmd (req , struct io_sr_msg );
1268
1273
struct io_ring_ctx * ctx = req -> ctx ;
1274
+ struct io_async_msghdr * iomsg ;
1269
1275
struct io_kiocb * notif ;
1270
1276
int ret ;
1271
1277
@@ -1308,8 +1314,15 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1308
1314
if (io_is_compat (req -> ctx ))
1309
1315
zc -> msg_flags |= MSG_CMSG_COMPAT ;
1310
1316
1311
- if (unlikely (!io_msg_alloc_async (req )))
1317
+ iomsg = io_msg_alloc_async (req );
1318
+ if (unlikely (!iomsg ))
1312
1319
return - ENOMEM ;
1320
+
1321
+ if (zc -> flags & IORING_RECVSEND_FIXED_BUF )
1322
+ iomsg -> msg .sg_from_iter = io_sg_from_iter ;
1323
+ else
1324
+ iomsg -> msg .sg_from_iter = io_sg_from_iter_iovec ;
1325
+
1313
1326
if (req -> opcode == IORING_OP_SEND_ZC ) {
1314
1327
req -> flags |= REQ_F_IMPORT_BUFFER ;
1315
1328
return io_send_setup (req , sqe );
@@ -1320,11 +1333,8 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1320
1333
if (unlikely (ret ))
1321
1334
return ret ;
1322
1335
1323
- if (!(zc -> flags & IORING_RECVSEND_FIXED_BUF )) {
1324
- struct io_async_msghdr * iomsg = req -> async_data ;
1325
-
1336
+ if (!(zc -> flags & IORING_RECVSEND_FIXED_BUF ))
1326
1337
return io_notif_account_mem (zc -> notif , iomsg -> msg .msg_iter .count );
1327
- }
1328
1338
return 0 ;
1329
1339
}
1330
1340
@@ -1391,15 +1401,13 @@ static int io_send_zc_import(struct io_kiocb *req, unsigned int issue_flags)
1391
1401
ITER_SOURCE , issue_flags );
1392
1402
if (unlikely (ret ))
1393
1403
return ret ;
1394
- kmsg -> msg .sg_from_iter = io_sg_from_iter ;
1395
1404
} else {
1396
1405
ret = import_ubuf (ITER_SOURCE , sr -> buf , sr -> len , & kmsg -> msg .msg_iter );
1397
1406
if (unlikely (ret ))
1398
1407
return ret ;
1399
1408
ret = io_notif_account_mem (sr -> notif , sr -> len );
1400
1409
if (unlikely (ret ))
1401
1410
return ret ;
1402
- kmsg -> msg .sg_from_iter = io_sg_from_iter_iovec ;
1403
1411
}
1404
1412
1405
1413
return ret ;
@@ -1483,8 +1491,6 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1483
1491
unsigned flags ;
1484
1492
int ret , min_ret = 0 ;
1485
1493
1486
- kmsg -> msg .sg_from_iter = io_sg_from_iter_iovec ;
1487
-
1488
1494
if (req -> flags & REQ_F_IMPORT_BUFFER ) {
1489
1495
unsigned uvec_segs = kmsg -> msg .msg_iter .nr_segs ;
1490
1496
int ret ;
@@ -1493,7 +1499,6 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1493
1499
& kmsg -> vec , uvec_segs , issue_flags );
1494
1500
if (unlikely (ret ))
1495
1501
return ret ;
1496
- kmsg -> msg .sg_from_iter = io_sg_from_iter ;
1497
1502
req -> flags &= ~REQ_F_IMPORT_BUFFER ;
1498
1503
}
1499
1504
0 commit comments