@@ -23,6 +23,7 @@ enum resp_states {
23
23
RESPST_READ_REPLY ,
24
24
RESPST_ATOMIC_REPLY ,
25
25
RESPST_ATOMIC_WRITE_REPLY ,
26
+ RESPST_PROCESS_FLUSH ,
26
27
RESPST_COMPLETE ,
27
28
RESPST_ACKNOWLEDGE ,
28
29
RESPST_CLEANUP ,
@@ -59,6 +60,7 @@ static char *resp_state_name[] = {
59
60
[RESPST_READ_REPLY ] = "READ_REPLY" ,
60
61
[RESPST_ATOMIC_REPLY ] = "ATOMIC_REPLY" ,
61
62
[RESPST_ATOMIC_WRITE_REPLY ] = "ATOMIC_WRITE_REPLY" ,
63
+ [RESPST_PROCESS_FLUSH ] = "PROCESS_FLUSH" ,
62
64
[RESPST_COMPLETE ] = "COMPLETE" ,
63
65
[RESPST_ACKNOWLEDGE ] = "ACKNOWLEDGE" ,
64
66
[RESPST_CLEANUP ] = "CLEANUP" ,
@@ -258,19 +260,37 @@ static enum resp_states check_op_seq(struct rxe_qp *qp,
258
260
}
259
261
}
260
262
263
+ static bool check_qp_attr_access (struct rxe_qp * qp ,
264
+ struct rxe_pkt_info * pkt )
265
+ {
266
+ if (((pkt -> mask & RXE_READ_MASK ) &&
267
+ !(qp -> attr .qp_access_flags & IB_ACCESS_REMOTE_READ )) ||
268
+ ((pkt -> mask & (RXE_WRITE_MASK | RXE_ATOMIC_WRITE_MASK )) &&
269
+ !(qp -> attr .qp_access_flags & IB_ACCESS_REMOTE_WRITE )) ||
270
+ ((pkt -> mask & RXE_ATOMIC_MASK ) &&
271
+ !(qp -> attr .qp_access_flags & IB_ACCESS_REMOTE_ATOMIC )))
272
+ return false;
273
+
274
+ if (pkt -> mask & RXE_FLUSH_MASK ) {
275
+ u32 flush_type = feth_plt (pkt );
276
+
277
+ if ((flush_type & IB_FLUSH_GLOBAL &&
278
+ !(qp -> attr .qp_access_flags & IB_ACCESS_FLUSH_GLOBAL )) ||
279
+ (flush_type & IB_FLUSH_PERSISTENT &&
280
+ !(qp -> attr .qp_access_flags & IB_ACCESS_FLUSH_PERSISTENT )))
281
+ return false;
282
+ }
283
+
284
+ return true;
285
+ }
286
+
261
287
static enum resp_states check_op_valid (struct rxe_qp * qp ,
262
288
struct rxe_pkt_info * pkt )
263
289
{
264
290
switch (qp_type (qp )) {
265
291
case IB_QPT_RC :
266
- if (((pkt -> mask & RXE_READ_MASK ) &&
267
- !(qp -> attr .qp_access_flags & IB_ACCESS_REMOTE_READ )) ||
268
- ((pkt -> mask & (RXE_WRITE_MASK | RXE_ATOMIC_WRITE_MASK )) &&
269
- !(qp -> attr .qp_access_flags & IB_ACCESS_REMOTE_WRITE )) ||
270
- ((pkt -> mask & RXE_ATOMIC_MASK ) &&
271
- !(qp -> attr .qp_access_flags & IB_ACCESS_REMOTE_ATOMIC ))) {
292
+ if (!check_qp_attr_access (qp , pkt ))
272
293
return RESPST_ERR_UNSUPPORTED_OPCODE ;
273
- }
274
294
275
295
break ;
276
296
@@ -437,6 +457,23 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp,
437
457
return RESPST_CHK_RKEY ;
438
458
}
439
459
460
+ static void qp_resp_from_reth (struct rxe_qp * qp , struct rxe_pkt_info * pkt )
461
+ {
462
+ qp -> resp .va = reth_va (pkt );
463
+ qp -> resp .offset = 0 ;
464
+ qp -> resp .rkey = reth_rkey (pkt );
465
+ qp -> resp .resid = reth_len (pkt );
466
+ qp -> resp .length = reth_len (pkt );
467
+ }
468
+
469
+ static void qp_resp_from_atmeth (struct rxe_qp * qp , struct rxe_pkt_info * pkt )
470
+ {
471
+ qp -> resp .va = atmeth_va (pkt );
472
+ qp -> resp .offset = 0 ;
473
+ qp -> resp .rkey = atmeth_rkey (pkt );
474
+ qp -> resp .resid = sizeof (u64 );
475
+ }
476
+
440
477
static enum resp_states check_rkey (struct rxe_qp * qp ,
441
478
struct rxe_pkt_info * pkt )
442
479
{
@@ -448,23 +485,26 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
448
485
u32 pktlen ;
449
486
int mtu = qp -> mtu ;
450
487
enum resp_states state ;
451
- int access ;
488
+ int access = 0 ;
452
489
453
490
if (pkt -> mask & (RXE_READ_OR_WRITE_MASK | RXE_ATOMIC_WRITE_MASK )) {
454
- if (pkt -> mask & RXE_RETH_MASK ) {
455
- qp -> resp .va = reth_va (pkt );
456
- qp -> resp .offset = 0 ;
457
- qp -> resp .rkey = reth_rkey (pkt );
458
- qp -> resp .resid = reth_len (pkt );
459
- qp -> resp .length = reth_len (pkt );
460
- }
491
+ if (pkt -> mask & RXE_RETH_MASK )
492
+ qp_resp_from_reth (qp , pkt );
493
+
461
494
access = (pkt -> mask & RXE_READ_MASK ) ? IB_ACCESS_REMOTE_READ
462
495
: IB_ACCESS_REMOTE_WRITE ;
496
+ } else if (pkt -> mask & RXE_FLUSH_MASK ) {
497
+ u32 flush_type = feth_plt (pkt );
498
+
499
+ if (pkt -> mask & RXE_RETH_MASK )
500
+ qp_resp_from_reth (qp , pkt );
501
+
502
+ if (flush_type & IB_FLUSH_GLOBAL )
503
+ access |= IB_ACCESS_FLUSH_GLOBAL ;
504
+ if (flush_type & IB_FLUSH_PERSISTENT )
505
+ access |= IB_ACCESS_FLUSH_PERSISTENT ;
463
506
} else if (pkt -> mask & RXE_ATOMIC_MASK ) {
464
- qp -> resp .va = atmeth_va (pkt );
465
- qp -> resp .offset = 0 ;
466
- qp -> resp .rkey = atmeth_rkey (pkt );
467
- qp -> resp .resid = sizeof (u64 );
507
+ qp_resp_from_atmeth (qp , pkt );
468
508
access = IB_ACCESS_REMOTE_ATOMIC ;
469
509
} else {
470
510
return RESPST_EXECUTE ;
@@ -511,11 +551,20 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
511
551
}
512
552
}
513
553
554
+ if (pkt -> mask & RXE_FLUSH_MASK ) {
555
+ /* FLUSH MR may not set va or resid
556
+ * no need to check range since we will flush whole mr
557
+ */
558
+ if (feth_sel (pkt ) == IB_FLUSH_MR )
559
+ goto skip_check_range ;
560
+ }
561
+
514
562
if (mr_check_range (mr , va + qp -> resp .offset , resid )) {
515
563
state = RESPST_ERR_RKEY_VIOLATION ;
516
564
goto err ;
517
565
}
518
566
567
+ skip_check_range :
519
568
if (pkt -> mask & (RXE_WRITE_MASK | RXE_ATOMIC_WRITE_MASK )) {
520
569
if (resid > mtu ) {
521
570
if (pktlen != mtu || bth_pad (pkt )) {
@@ -621,11 +670,61 @@ static struct resp_res *rxe_prepare_res(struct rxe_qp *qp,
621
670
res -> last_psn = pkt -> psn ;
622
671
res -> cur_psn = pkt -> psn ;
623
672
break ;
673
+ case RXE_FLUSH_MASK :
674
+ res -> flush .va = qp -> resp .va + qp -> resp .offset ;
675
+ res -> flush .length = qp -> resp .length ;
676
+ res -> flush .type = feth_plt (pkt );
677
+ res -> flush .level = feth_sel (pkt );
624
678
}
625
679
626
680
return res ;
627
681
}
628
682
683
+ static enum resp_states process_flush (struct rxe_qp * qp ,
684
+ struct rxe_pkt_info * pkt )
685
+ {
686
+ u64 length , start ;
687
+ struct rxe_mr * mr = qp -> resp .mr ;
688
+ struct resp_res * res = qp -> resp .res ;
689
+
690
+ /* oA19-14, oA19-15 */
691
+ if (res && res -> replay )
692
+ return RESPST_ACKNOWLEDGE ;
693
+ else if (!res ) {
694
+ res = rxe_prepare_res (qp , pkt , RXE_FLUSH_MASK );
695
+ qp -> resp .res = res ;
696
+ }
697
+
698
+ if (res -> flush .level == IB_FLUSH_RANGE ) {
699
+ start = res -> flush .va ;
700
+ length = res -> flush .length ;
701
+ } else { /* level == IB_FLUSH_MR */
702
+ start = mr -> ibmr .iova ;
703
+ length = mr -> ibmr .length ;
704
+ }
705
+
706
+ if (res -> flush .type & IB_FLUSH_PERSISTENT ) {
707
+ if (rxe_flush_pmem_iova (mr , start , length ))
708
+ return RESPST_ERR_RKEY_VIOLATION ;
709
+ /* Make data persistent. */
710
+ wmb ();
711
+ } else if (res -> flush .type & IB_FLUSH_GLOBAL ) {
712
+ /* Make data global visibility. */
713
+ wmb ();
714
+ }
715
+
716
+ qp -> resp .msn ++ ;
717
+
718
+ /* next expected psn, read handles this separately */
719
+ qp -> resp .psn = (pkt -> psn + 1 ) & BTH_PSN_MASK ;
720
+ qp -> resp .ack_psn = qp -> resp .psn ;
721
+
722
+ qp -> resp .opcode = pkt -> opcode ;
723
+ qp -> resp .status = IB_WC_SUCCESS ;
724
+
725
+ return RESPST_ACKNOWLEDGE ;
726
+ }
727
+
629
728
/* Guarantee atomicity of atomic operations at the machine level. */
630
729
static DEFINE_SPINLOCK (atomic_ops_lock );
631
730
@@ -980,6 +1079,8 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
980
1079
return RESPST_ATOMIC_REPLY ;
981
1080
} else if (pkt -> mask & RXE_ATOMIC_WRITE_MASK ) {
982
1081
return RESPST_ATOMIC_WRITE_REPLY ;
1082
+ } else if (pkt -> mask & RXE_FLUSH_MASK ) {
1083
+ return RESPST_PROCESS_FLUSH ;
983
1084
} else {
984
1085
/* Unreachable */
985
1086
WARN_ON_ONCE (1 );
@@ -1176,7 +1277,7 @@ static enum resp_states acknowledge(struct rxe_qp *qp,
1176
1277
send_ack (qp , qp -> resp .aeth_syndrome , pkt -> psn );
1177
1278
else if (pkt -> mask & RXE_ATOMIC_MASK )
1178
1279
send_atomic_ack (qp , AETH_ACK_UNLIMITED , pkt -> psn );
1179
- else if (pkt -> mask & RXE_ATOMIC_WRITE_MASK )
1280
+ else if (pkt -> mask & ( RXE_FLUSH_MASK | RXE_ATOMIC_WRITE_MASK ) )
1180
1281
send_read_response_ack (qp , AETH_ACK_UNLIMITED , pkt -> psn );
1181
1282
else if (bth_ack (pkt ))
1182
1283
send_ack (qp , AETH_ACK_UNLIMITED , pkt -> psn );
@@ -1234,6 +1335,22 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
1234
1335
/* SEND. Ack again and cleanup. C9-105. */
1235
1336
send_ack (qp , AETH_ACK_UNLIMITED , prev_psn );
1236
1337
return RESPST_CLEANUP ;
1338
+ } else if (pkt -> mask & RXE_FLUSH_MASK ) {
1339
+ struct resp_res * res ;
1340
+
1341
+ /* Find the operation in our list of responder resources. */
1342
+ res = find_resource (qp , pkt -> psn );
1343
+ if (res ) {
1344
+ res -> replay = 1 ;
1345
+ res -> cur_psn = pkt -> psn ;
1346
+ qp -> resp .res = res ;
1347
+ rc = RESPST_PROCESS_FLUSH ;
1348
+ goto out ;
1349
+ }
1350
+
1351
+ /* Resource not found. Class D error. Drop the request. */
1352
+ rc = RESPST_CLEANUP ;
1353
+ goto out ;
1237
1354
} else if (pkt -> mask & RXE_READ_MASK ) {
1238
1355
struct resp_res * res ;
1239
1356
@@ -1431,6 +1548,9 @@ int rxe_responder(void *arg)
1431
1548
case RESPST_ATOMIC_WRITE_REPLY :
1432
1549
state = atomic_write_reply (qp , pkt );
1433
1550
break ;
1551
+ case RESPST_PROCESS_FLUSH :
1552
+ state = process_flush (qp , pkt );
1553
+ break ;
1434
1554
case RESPST_ACKNOWLEDGE :
1435
1555
state = acknowledge (qp , pkt );
1436
1556
break ;
0 commit comments