@@ -206,7 +206,9 @@ struct smb_direct_recvmsg {
206
206
struct smb_direct_rdma_rw_msg {
207
207
struct smb_direct_transport * t ;
208
208
struct ib_cqe cqe ;
209
+ int status ;
209
210
struct completion * completion ;
211
+ struct list_head list ;
210
212
struct rdma_rw_ctx rw_ctx ;
211
213
struct sg_table sgt ;
212
214
struct scatterlist sg_list [];
@@ -1311,6 +1313,16 @@ static int smb_direct_writev(struct ksmbd_transport *t,
1311
1313
return ret ;
1312
1314
}
1313
1315
1316
+ static void smb_direct_free_rdma_rw_msg (struct smb_direct_transport * t ,
1317
+ struct smb_direct_rdma_rw_msg * msg ,
1318
+ enum dma_data_direction dir )
1319
+ {
1320
+ rdma_rw_ctx_destroy (& msg -> rw_ctx , t -> qp , t -> qp -> port ,
1321
+ msg -> sgt .sgl , msg -> sgt .nents , dir );
1322
+ sg_free_table_chained (& msg -> sgt , SG_CHUNK_SIZE );
1323
+ kfree (msg );
1324
+ }
1325
+
1314
1326
static void read_write_done (struct ib_cq * cq , struct ib_wc * wc ,
1315
1327
enum dma_data_direction dir )
1316
1328
{
@@ -1319,19 +1331,14 @@ static void read_write_done(struct ib_cq *cq, struct ib_wc *wc,
1319
1331
struct smb_direct_transport * t = msg -> t ;
1320
1332
1321
1333
if (wc -> status != IB_WC_SUCCESS ) {
1334
+ msg -> status = - EIO ;
1322
1335
pr_err ("read/write error. opcode = %d, status = %s(%d)\n" ,
1323
1336
wc -> opcode , ib_wc_status_msg (wc -> status ), wc -> status );
1324
- smb_direct_disconnect_rdma_connection (t );
1337
+ if (wc -> status != IB_WC_WR_FLUSH_ERR )
1338
+ smb_direct_disconnect_rdma_connection (t );
1325
1339
}
1326
1340
1327
- if (atomic_inc_return (& t -> rw_credits ) > 0 )
1328
- wake_up (& t -> wait_rw_credits );
1329
-
1330
- rdma_rw_ctx_destroy (& msg -> rw_ctx , t -> qp , t -> qp -> port ,
1331
- msg -> sg_list , msg -> sgt .nents , dir );
1332
- sg_free_table_chained (& msg -> sgt , SG_CHUNK_SIZE );
1333
1341
complete (msg -> completion );
1334
- kfree (msg );
1335
1342
}
1336
1343
1337
1344
static void read_done (struct ib_cq * cq , struct ib_wc * wc )
@@ -1350,75 +1357,116 @@ static int smb_direct_rdma_xmit(struct smb_direct_transport *t,
1350
1357
unsigned int desc_len ,
1351
1358
bool is_read )
1352
1359
{
1353
- struct smb_direct_rdma_rw_msg * msg ;
1354
- int ret ;
1360
+ struct smb_direct_rdma_rw_msg * msg , * next_msg ;
1361
+ int i , ret ;
1355
1362
DECLARE_COMPLETION_ONSTACK (completion );
1356
- struct ib_send_wr * first_wr = NULL ;
1357
- u32 remote_key = le32_to_cpu ( desc [ 0 ]. token );
1358
- u64 remote_offset = le64_to_cpu ( desc [ 0 ]. offset ) ;
1363
+ struct ib_send_wr * first_wr ;
1364
+ LIST_HEAD ( msg_list );
1365
+ char * desc_buf ;
1359
1366
int credits_needed ;
1367
+ unsigned int desc_buf_len ;
1368
+ size_t total_length = 0 ;
1369
+
1370
+ if (t -> status != SMB_DIRECT_CS_CONNECTED )
1371
+ return - ENOTCONN ;
1372
+
1373
+ /* calculate needed credits */
1374
+ credits_needed = 0 ;
1375
+ desc_buf = buf ;
1376
+ for (i = 0 ; i < desc_len / sizeof (* desc ); i ++ ) {
1377
+ desc_buf_len = le32_to_cpu (desc [i ].length );
1378
+
1379
+ credits_needed += calc_rw_credits (t , desc_buf , desc_buf_len );
1380
+ desc_buf += desc_buf_len ;
1381
+ total_length += desc_buf_len ;
1382
+ if (desc_buf_len == 0 || total_length > buf_len ||
1383
+ total_length > t -> max_rdma_rw_size )
1384
+ return - EINVAL ;
1385
+ }
1386
+
1387
+ ksmbd_debug (RDMA , "RDMA %s, len %#x, needed credits %#x\n" ,
1388
+ is_read ? "read" : "write" , buf_len , credits_needed );
1360
1389
1361
- credits_needed = calc_rw_credits (t , buf , buf_len );
1362
1390
ret = wait_for_rw_credits (t , credits_needed );
1363
1391
if (ret < 0 )
1364
1392
return ret ;
1365
1393
1366
- /* TODO: mempool */
1367
- msg = kmalloc (offsetof(struct smb_direct_rdma_rw_msg , sg_list ) +
1368
- sizeof (struct scatterlist ) * SG_CHUNK_SIZE , GFP_KERNEL );
1369
- if (!msg ) {
1370
- atomic_add (credits_needed , & t -> rw_credits );
1371
- return - ENOMEM ;
1372
- }
1394
+ /* build rdma_rw_ctx for each descriptor */
1395
+ desc_buf = buf ;
1396
+ for (i = 0 ; i < desc_len / sizeof (* desc ); i ++ ) {
1397
+ msg = kzalloc (offsetof(struct smb_direct_rdma_rw_msg , sg_list ) +
1398
+ sizeof (struct scatterlist ) * SG_CHUNK_SIZE , GFP_KERNEL );
1399
+ if (!msg ) {
1400
+ ret = - ENOMEM ;
1401
+ goto out ;
1402
+ }
1373
1403
1374
- msg -> sgt .sgl = & msg -> sg_list [0 ];
1375
- ret = sg_alloc_table_chained (& msg -> sgt ,
1376
- get_buf_page_count (buf , buf_len ),
1377
- msg -> sg_list , SG_CHUNK_SIZE );
1378
- if (ret ) {
1379
- atomic_add (credits_needed , & t -> rw_credits );
1380
- kfree (msg );
1381
- return - ENOMEM ;
1382
- }
1404
+ desc_buf_len = le32_to_cpu (desc [i ].length );
1383
1405
1384
- ret = get_sg_list (buf , buf_len , msg -> sgt .sgl , msg -> sgt .orig_nents );
1385
- if (ret <= 0 ) {
1386
- pr_err ("failed to get pages\n" );
1387
- goto err ;
1388
- }
1406
+ msg -> t = t ;
1407
+ msg -> cqe .done = is_read ? read_done : write_done ;
1408
+ msg -> completion = & completion ;
1389
1409
1390
- ret = rdma_rw_ctx_init (& msg -> rw_ctx , t -> qp , t -> qp -> port ,
1391
- msg -> sg_list , get_buf_page_count (buf , buf_len ),
1392
- 0 , remote_offset , remote_key ,
1393
- is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE );
1394
- if (ret < 0 ) {
1395
- pr_err ("failed to init rdma_rw_ctx: %d\n" , ret );
1396
- goto err ;
1410
+ msg -> sgt .sgl = & msg -> sg_list [0 ];
1411
+ ret = sg_alloc_table_chained (& msg -> sgt ,
1412
+ get_buf_page_count (desc_buf , desc_buf_len ),
1413
+ msg -> sg_list , SG_CHUNK_SIZE );
1414
+ if (ret ) {
1415
+ kfree (msg );
1416
+ ret = - ENOMEM ;
1417
+ goto out ;
1418
+ }
1419
+
1420
+ ret = get_sg_list (desc_buf , desc_buf_len ,
1421
+ msg -> sgt .sgl , msg -> sgt .orig_nents );
1422
+ if (ret < 0 ) {
1423
+ sg_free_table_chained (& msg -> sgt , SG_CHUNK_SIZE );
1424
+ kfree (msg );
1425
+ goto out ;
1426
+ }
1427
+
1428
+ ret = rdma_rw_ctx_init (& msg -> rw_ctx , t -> qp , t -> qp -> port ,
1429
+ msg -> sgt .sgl ,
1430
+ get_buf_page_count (desc_buf , desc_buf_len ),
1431
+ 0 ,
1432
+ le64_to_cpu (desc [i ].offset ),
1433
+ le32_to_cpu (desc [i ].token ),
1434
+ is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE );
1435
+ if (ret < 0 ) {
1436
+ pr_err ("failed to init rdma_rw_ctx: %d\n" , ret );
1437
+ sg_free_table_chained (& msg -> sgt , SG_CHUNK_SIZE );
1438
+ kfree (msg );
1439
+ goto out ;
1440
+ }
1441
+
1442
+ list_add_tail (& msg -> list , & msg_list );
1443
+ desc_buf += desc_buf_len ;
1397
1444
}
1398
1445
1399
- msg -> t = t ;
1400
- msg -> cqe .done = is_read ? read_done : write_done ;
1401
- msg -> completion = & completion ;
1402
- first_wr = rdma_rw_ctx_wrs (& msg -> rw_ctx , t -> qp , t -> qp -> port ,
1403
- & msg -> cqe , NULL );
1446
+ /* concatenate work requests of rdma_rw_ctxs */
1447
+ first_wr = NULL ;
1448
+ list_for_each_entry_reverse (msg , & msg_list , list ) {
1449
+ first_wr = rdma_rw_ctx_wrs (& msg -> rw_ctx , t -> qp , t -> qp -> port ,
1450
+ & msg -> cqe , first_wr );
1451
+ }
1404
1452
1405
1453
ret = ib_post_send (t -> qp , first_wr , NULL );
1406
1454
if (ret ) {
1407
- pr_err ("failed to post send wr: %d\n" , ret );
1408
- goto err ;
1455
+ pr_err ("failed to post send wr for RDMA R/W : %d\n" , ret );
1456
+ goto out ;
1409
1457
}
1410
1458
1459
+ msg = list_last_entry (& msg_list , struct smb_direct_rdma_rw_msg , list );
1411
1460
wait_for_completion (& completion );
1412
- return 0 ;
1413
-
1414
- err :
1461
+ ret = msg -> status ;
1462
+ out :
1463
+ list_for_each_entry_safe (msg , next_msg , & msg_list , list ) {
1464
+ list_del (& msg -> list );
1465
+ smb_direct_free_rdma_rw_msg (t , msg ,
1466
+ is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE );
1467
+ }
1415
1468
atomic_add (credits_needed , & t -> rw_credits );
1416
- if (first_wr )
1417
- rdma_rw_ctx_destroy (& msg -> rw_ctx , t -> qp , t -> qp -> port ,
1418
- msg -> sg_list , msg -> sgt .nents ,
1419
- is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE );
1420
- sg_free_table_chained (& msg -> sgt , SG_CHUNK_SIZE );
1421
- kfree (msg );
1469
+ wake_up (& t -> wait_rw_credits );
1422
1470
return ret ;
1423
1471
}
1424
1472
0 commit comments