@@ -91,10 +91,11 @@ static u32 to_hr_opcode(u32 ib_opcode)
91
91
}
92
92
93
93
static void set_frmr_seg (struct hns_roce_v2_rc_send_wqe * rc_sq_wqe ,
94
- void * wqe , const struct ib_reg_wr * wr )
94
+ const struct ib_reg_wr * wr )
95
95
{
96
+ struct hns_roce_wqe_frmr_seg * fseg =
97
+ (void * )rc_sq_wqe + sizeof (struct hns_roce_v2_rc_send_wqe );
96
98
struct hns_roce_mr * mr = to_hr_mr (wr -> mr );
97
- struct hns_roce_wqe_frmr_seg * fseg = wqe ;
98
99
u64 pbl_ba ;
99
100
100
101
/* use ib_access_flags */
@@ -128,22 +129,24 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
128
129
V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S , 0 );
129
130
}
130
131
131
- static void set_atomic_seg (const struct ib_send_wr * wr , void * wqe ,
132
+ static void set_atomic_seg (const struct ib_send_wr * wr ,
132
133
struct hns_roce_v2_rc_send_wqe * rc_sq_wqe ,
133
134
unsigned int valid_num_sge )
134
135
{
135
- struct hns_roce_wqe_atomic_seg * aseg ;
136
+ struct hns_roce_v2_wqe_data_seg * dseg =
137
+ (void * )rc_sq_wqe + sizeof (struct hns_roce_v2_rc_send_wqe );
138
+ struct hns_roce_wqe_atomic_seg * aseg =
139
+ (void * )dseg + sizeof (struct hns_roce_v2_wqe_data_seg );
136
140
137
- set_data_seg_v2 (wqe , wr -> sg_list );
138
- aseg = wqe + sizeof (struct hns_roce_v2_wqe_data_seg );
141
+ set_data_seg_v2 (dseg , wr -> sg_list );
139
142
140
143
if (wr -> opcode == IB_WR_ATOMIC_CMP_AND_SWP ) {
141
144
aseg -> fetchadd_swap_data = cpu_to_le64 (atomic_wr (wr )-> swap );
142
145
aseg -> cmp_data = cpu_to_le64 (atomic_wr (wr )-> compare_add );
143
146
} else {
144
147
aseg -> fetchadd_swap_data =
145
148
cpu_to_le64 (atomic_wr (wr )-> compare_add );
146
- aseg -> cmp_data = 0 ;
149
+ aseg -> cmp_data = 0 ;
147
150
}
148
151
149
152
roce_set_field (rc_sq_wqe -> byte_16 , V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M ,
@@ -176,13 +179,15 @@ static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
176
179
177
180
static int set_rwqe_data_seg (struct ib_qp * ibqp , const struct ib_send_wr * wr ,
178
181
struct hns_roce_v2_rc_send_wqe * rc_sq_wqe ,
179
- void * wqe , unsigned int * sge_ind ,
182
+ unsigned int * sge_ind ,
180
183
unsigned int valid_num_sge )
181
184
{
182
185
struct hns_roce_dev * hr_dev = to_hr_dev (ibqp -> device );
183
- struct hns_roce_v2_wqe_data_seg * dseg = wqe ;
186
+ struct hns_roce_v2_wqe_data_seg * dseg =
187
+ (void * )rc_sq_wqe + sizeof (struct hns_roce_v2_rc_send_wqe );
184
188
struct ib_device * ibdev = & hr_dev -> ib_dev ;
185
189
struct hns_roce_qp * qp = to_hr_qp (ibqp );
190
+ void * wqe = dseg ;
186
191
int j = 0 ;
187
192
int i ;
188
193
@@ -438,7 +443,6 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
438
443
roce_set_bit (rc_sq_wqe -> byte_4 , V2_RC_SEND_WQE_BYTE_4_OWNER_S ,
439
444
owner_bit );
440
445
441
- wqe += sizeof (struct hns_roce_v2_rc_send_wqe );
442
446
switch (wr -> opcode ) {
443
447
case IB_WR_RDMA_READ :
444
448
case IB_WR_RDMA_WRITE :
@@ -451,7 +455,7 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
451
455
rc_sq_wqe -> inv_key = cpu_to_le32 (wr -> ex .invalidate_rkey );
452
456
break ;
453
457
case IB_WR_REG_MR :
454
- set_frmr_seg (rc_sq_wqe , wqe , reg_wr (wr ));
458
+ set_frmr_seg (rc_sq_wqe , reg_wr (wr ));
455
459
break ;
456
460
case IB_WR_ATOMIC_CMP_AND_SWP :
457
461
case IB_WR_ATOMIC_FETCH_AND_ADD :
@@ -468,10 +472,10 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
468
472
469
473
if (wr -> opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
470
474
wr -> opcode == IB_WR_ATOMIC_FETCH_AND_ADD )
471
- set_atomic_seg (wr , wqe , rc_sq_wqe , valid_num_sge );
475
+ set_atomic_seg (wr , rc_sq_wqe , valid_num_sge );
472
476
else if (wr -> opcode != IB_WR_REG_MR )
473
477
ret = set_rwqe_data_seg (& qp -> ibqp , wr , rc_sq_wqe ,
474
- wqe , & curr_idx , valid_num_sge );
478
+ & curr_idx , valid_num_sge );
475
479
476
480
* sge_idx = curr_idx ;
477
481
0 commit comments