@@ -785,53 +785,61 @@ static enum resp_states atomic_reply(struct rxe_qp *qp,
785
785
return ret ;
786
786
}
787
787
788
- static enum resp_states atomic_write_reply (struct rxe_qp * qp ,
789
- struct rxe_pkt_info * pkt )
788
+ #ifdef CONFIG_64BIT
789
+ static enum resp_states do_atomic_write (struct rxe_qp * qp ,
790
+ struct rxe_pkt_info * pkt )
790
791
{
791
- u64 src , * dst ;
792
- struct resp_res * res = qp -> resp .res ;
793
792
struct rxe_mr * mr = qp -> resp .mr ;
794
793
int payload = payload_size (pkt );
794
+ u64 src , * dst ;
795
795
796
- if (!res ) {
797
- res = rxe_prepare_res (qp , pkt , RXE_ATOMIC_WRITE_MASK );
798
- qp -> resp .res = res ;
799
- }
800
-
801
- if (!res -> replay ) {
802
- #ifdef CONFIG_64BIT
803
- if (mr -> state != RXE_MR_STATE_VALID )
804
- return RESPST_ERR_RKEY_VIOLATION ;
805
-
806
- memcpy (& src , payload_addr (pkt ), payload );
796
+ if (mr -> state != RXE_MR_STATE_VALID )
797
+ return RESPST_ERR_RKEY_VIOLATION ;
807
798
808
- dst = iova_to_vaddr (mr , qp -> resp .va + qp -> resp .offset , payload );
809
- /* check vaddr is 8 bytes aligned. */
810
- if (!dst || (uintptr_t )dst & 7 )
811
- return RESPST_ERR_MISALIGNED_ATOMIC ;
799
+ memcpy (& src , payload_addr (pkt ), payload );
812
800
813
- /* Do atomic write after all prior operations have completed */
814
- smp_store_release (dst , src );
801
+ dst = iova_to_vaddr (mr , qp -> resp .va + qp -> resp .offset , payload );
802
+ /* check vaddr is 8 bytes aligned. */
803
+ if (!dst || (uintptr_t )dst & 7 )
804
+ return RESPST_ERR_MISALIGNED_ATOMIC ;
815
805
816
- /* decrease resp.resid to zero */
817
- qp -> resp . resid -= sizeof ( payload );
806
+ /* Do atomic write after all prior operations have completed */
807
+ smp_store_release ( dst , src );
818
808
819
- qp -> resp .msn ++ ;
809
+ /* decrease resp.resid to zero */
810
+ qp -> resp .resid -= sizeof (payload );
820
811
821
- /* next expected psn, read handles this separately */
822
- qp -> resp .psn = (pkt -> psn + 1 ) & BTH_PSN_MASK ;
823
- qp -> resp .ack_psn = qp -> resp .psn ;
812
+ qp -> resp .msn ++ ;
824
813
825
- qp -> resp .opcode = pkt -> opcode ;
826
- qp -> resp .status = IB_WC_SUCCESS ;
814
+ /* next expected psn, read handles this separately */
815
+ qp -> resp .psn = (pkt -> psn + 1 ) & BTH_PSN_MASK ;
816
+ qp -> resp .ack_psn = qp -> resp .psn ;
827
817
828
- return RESPST_ACKNOWLEDGE ;
818
+ qp -> resp .opcode = pkt -> opcode ;
819
+ qp -> resp .status = IB_WC_SUCCESS ;
820
+ return RESPST_ACKNOWLEDGE ;
821
+ }
829
822
#else
830
- return RESPST_ERR_UNSUPPORTED_OPCODE ;
823
+ static enum resp_states do_atomic_write (struct rxe_qp * qp ,
824
+ struct rxe_pkt_info * pkt )
825
+ {
826
+ return RESPST_ERR_UNSUPPORTED_OPCODE ;
827
+ }
831
828
#endif /* CONFIG_64BIT */
829
+
830
+ static enum resp_states atomic_write_reply (struct rxe_qp * qp ,
831
+ struct rxe_pkt_info * pkt )
832
+ {
833
+ struct resp_res * res = qp -> resp .res ;
834
+
835
+ if (!res ) {
836
+ res = rxe_prepare_res (qp , pkt , RXE_ATOMIC_WRITE_MASK );
837
+ qp -> resp .res = res ;
832
838
}
833
839
834
- return RESPST_ACKNOWLEDGE ;
840
+ if (res -> replay )
841
+ return RESPST_ACKNOWLEDGE ;
842
+ return do_atomic_write (qp , pkt );
835
843
}
836
844
837
845
static struct sk_buff * prepare_ack_packet (struct rxe_qp * qp ,
0 commit comments