@@ -67,30 +67,30 @@ static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp)
67
67
user_qp = & qp -> user_qp ;
68
68
req .sq_cqn_mtt_cfg = FIELD_PREP (
69
69
ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK ,
70
- ilog2 (user_qp -> sq_mtt .page_size ) - ERDMA_HW_PAGE_SHIFT );
70
+ ilog2 (user_qp -> sq_mem .page_size ) - ERDMA_HW_PAGE_SHIFT );
71
71
req .sq_cqn_mtt_cfg |=
72
72
FIELD_PREP (ERDMA_CMD_CREATE_QP_CQN_MASK , qp -> scq -> cqn );
73
73
74
74
req .rq_cqn_mtt_cfg = FIELD_PREP (
75
75
ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK ,
76
- ilog2 (user_qp -> rq_mtt .page_size ) - ERDMA_HW_PAGE_SHIFT );
76
+ ilog2 (user_qp -> rq_mem .page_size ) - ERDMA_HW_PAGE_SHIFT );
77
77
req .rq_cqn_mtt_cfg |=
78
78
FIELD_PREP (ERDMA_CMD_CREATE_QP_CQN_MASK , qp -> rcq -> cqn );
79
79
80
- req .sq_mtt_cfg = user_qp -> sq_mtt .page_offset ;
80
+ req .sq_mtt_cfg = user_qp -> sq_mem .page_offset ;
81
81
req .sq_mtt_cfg |= FIELD_PREP (ERDMA_CMD_CREATE_QP_MTT_CNT_MASK ,
82
- user_qp -> sq_mtt .mtt_nents ) |
82
+ user_qp -> sq_mem .mtt_nents ) |
83
83
FIELD_PREP (ERDMA_CMD_CREATE_QP_MTT_TYPE_MASK ,
84
- user_qp -> sq_mtt .mtt_type );
84
+ user_qp -> sq_mem .mtt_type );
85
85
86
- req .rq_mtt_cfg = user_qp -> rq_mtt .page_offset ;
86
+ req .rq_mtt_cfg = user_qp -> rq_mem .page_offset ;
87
87
req .rq_mtt_cfg |= FIELD_PREP (ERDMA_CMD_CREATE_QP_MTT_CNT_MASK ,
88
- user_qp -> rq_mtt .mtt_nents ) |
88
+ user_qp -> rq_mem .mtt_nents ) |
89
89
FIELD_PREP (ERDMA_CMD_CREATE_QP_MTT_TYPE_MASK ,
90
- user_qp -> rq_mtt .mtt_type );
90
+ user_qp -> rq_mem .mtt_type );
91
91
92
- req .sq_buf_addr = user_qp -> sq_mtt .mtt_entry [0 ];
93
- req .rq_buf_addr = user_qp -> rq_mtt .mtt_entry [0 ];
92
+ req .sq_buf_addr = user_qp -> sq_mem .mtt_entry [0 ];
93
+ req .rq_buf_addr = user_qp -> rq_mem .mtt_entry [0 ];
94
94
95
95
req .sq_db_info_dma_addr = user_qp -> sq_db_info_dma_addr ;
96
96
req .rq_db_info_dma_addr = user_qp -> rq_db_info_dma_addr ;
@@ -161,7 +161,7 @@ static int create_cq_cmd(struct erdma_ucontext *uctx, struct erdma_cq *cq)
161
161
{
162
162
struct erdma_dev * dev = to_edev (cq -> ibcq .device );
163
163
struct erdma_cmdq_create_cq_req req ;
164
- struct erdma_mem * mtt ;
164
+ struct erdma_mem * mem ;
165
165
u32 page_size ;
166
166
167
167
erdma_cmdq_build_reqhdr (& req .hdr , CMDQ_SUBMOD_RDMA ,
@@ -186,23 +186,23 @@ static int create_cq_cmd(struct erdma_ucontext *uctx, struct erdma_cq *cq)
186
186
req .cq_db_info_addr =
187
187
cq -> kern_cq .qbuf_dma_addr + (cq -> depth << CQE_SHIFT );
188
188
} else {
189
- mtt = & cq -> user_cq .qbuf_mtt ;
189
+ mem = & cq -> user_cq .qbuf_mem ;
190
190
req .cfg0 |=
191
191
FIELD_PREP (ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK ,
192
- ilog2 (mtt -> page_size ) - ERDMA_HW_PAGE_SHIFT );
193
- if (mtt -> mtt_nents == 1 ) {
194
- req .qbuf_addr_l = lower_32_bits (* (u64 * )mtt -> mtt_buf );
195
- req .qbuf_addr_h = upper_32_bits (* (u64 * )mtt -> mtt_buf );
192
+ ilog2 (mem -> page_size ) - ERDMA_HW_PAGE_SHIFT );
193
+ if (mem -> mtt_nents == 1 ) {
194
+ req .qbuf_addr_l = lower_32_bits (* (u64 * )mem -> mtt_buf );
195
+ req .qbuf_addr_h = upper_32_bits (* (u64 * )mem -> mtt_buf );
196
196
} else {
197
- req .qbuf_addr_l = lower_32_bits (mtt -> mtt_entry [0 ]);
198
- req .qbuf_addr_h = upper_32_bits (mtt -> mtt_entry [0 ]);
197
+ req .qbuf_addr_l = lower_32_bits (mem -> mtt_entry [0 ]);
198
+ req .qbuf_addr_h = upper_32_bits (mem -> mtt_entry [0 ]);
199
199
}
200
200
req .cfg1 |= FIELD_PREP (ERDMA_CMD_CREATE_CQ_MTT_CNT_MASK ,
201
- mtt -> mtt_nents );
201
+ mem -> mtt_nents );
202
202
req .cfg1 |= FIELD_PREP (ERDMA_CMD_CREATE_CQ_MTT_TYPE_MASK ,
203
- mtt -> mtt_type );
203
+ mem -> mtt_type );
204
204
205
- req .first_page_offset = mtt -> page_offset ;
205
+ req .first_page_offset = mem -> page_offset ;
206
206
req .cq_db_info_addr = cq -> user_cq .db_info_dma_addr ;
207
207
208
208
if (uctx -> ext_db .enable ) {
@@ -660,7 +660,7 @@ static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx,
660
660
qp -> attrs .rq_size * RQE_SIZE ))
661
661
return - EINVAL ;
662
662
663
- ret = get_mtt_entries (qp -> dev , & qp -> user_qp .sq_mtt , va ,
663
+ ret = get_mtt_entries (qp -> dev , & qp -> user_qp .sq_mem , va ,
664
664
qp -> attrs .sq_size << SQEBB_SHIFT , 0 , va ,
665
665
(SZ_1M - SZ_4K ), 1 );
666
666
if (ret )
@@ -669,7 +669,7 @@ static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx,
669
669
rq_offset = ALIGN (qp -> attrs .sq_size << SQEBB_SHIFT , ERDMA_HW_PAGE_SIZE );
670
670
qp -> user_qp .rq_offset = rq_offset ;
671
671
672
- ret = get_mtt_entries (qp -> dev , & qp -> user_qp .rq_mtt , va + rq_offset ,
672
+ ret = get_mtt_entries (qp -> dev , & qp -> user_qp .rq_mem , va + rq_offset ,
673
673
qp -> attrs .rq_size << RQE_SHIFT , 0 , va + rq_offset ,
674
674
(SZ_1M - SZ_4K ), 1 );
675
675
if (ret )
@@ -687,18 +687,18 @@ static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx,
687
687
return 0 ;
688
688
689
689
put_rq_mtt :
690
- put_mtt_entries (qp -> dev , & qp -> user_qp .rq_mtt );
690
+ put_mtt_entries (qp -> dev , & qp -> user_qp .rq_mem );
691
691
692
692
put_sq_mtt :
693
- put_mtt_entries (qp -> dev , & qp -> user_qp .sq_mtt );
693
+ put_mtt_entries (qp -> dev , & qp -> user_qp .sq_mem );
694
694
695
695
return ret ;
696
696
}
697
697
698
698
static void free_user_qp (struct erdma_qp * qp , struct erdma_ucontext * uctx )
699
699
{
700
- put_mtt_entries (qp -> dev , & qp -> user_qp .sq_mtt );
701
- put_mtt_entries (qp -> dev , & qp -> user_qp .rq_mtt );
700
+ put_mtt_entries (qp -> dev , & qp -> user_qp .sq_mem );
701
+ put_mtt_entries (qp -> dev , & qp -> user_qp .rq_mem );
702
702
erdma_unmap_user_dbrecords (uctx , & qp -> user_qp .user_dbr_page );
703
703
}
704
704
@@ -1041,7 +1041,7 @@ int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
1041
1041
cq -> kern_cq .qbuf , cq -> kern_cq .qbuf_dma_addr );
1042
1042
} else {
1043
1043
erdma_unmap_user_dbrecords (ctx , & cq -> user_cq .user_dbr_page );
1044
- put_mtt_entries (dev , & cq -> user_cq .qbuf_mtt );
1044
+ put_mtt_entries (dev , & cq -> user_cq .qbuf_mem );
1045
1045
}
1046
1046
1047
1047
xa_erase (& dev -> cq_xa , cq -> cqn );
@@ -1089,8 +1089,8 @@ int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
1089
1089
WARPPED_BUFSIZE (qp -> attrs .sq_size << SQEBB_SHIFT ),
1090
1090
qp -> kern_qp .sq_buf , qp -> kern_qp .sq_buf_dma_addr );
1091
1091
} else {
1092
- put_mtt_entries (dev , & qp -> user_qp .sq_mtt );
1093
- put_mtt_entries (dev , & qp -> user_qp .rq_mtt );
1092
+ put_mtt_entries (dev , & qp -> user_qp .sq_mem );
1093
+ put_mtt_entries (dev , & qp -> user_qp .rq_mem );
1094
1094
erdma_unmap_user_dbrecords (ctx , & qp -> user_qp .user_dbr_page );
1095
1095
}
1096
1096
@@ -1379,7 +1379,7 @@ static int erdma_init_user_cq(struct erdma_ucontext *ctx, struct erdma_cq *cq,
1379
1379
int ret ;
1380
1380
struct erdma_dev * dev = to_edev (cq -> ibcq .device );
1381
1381
1382
- ret = get_mtt_entries (dev , & cq -> user_cq .qbuf_mtt , ureq -> qbuf_va ,
1382
+ ret = get_mtt_entries (dev , & cq -> user_cq .qbuf_mem , ureq -> qbuf_va ,
1383
1383
ureq -> qbuf_len , 0 , ureq -> qbuf_va , SZ_64M - SZ_4K ,
1384
1384
1 );
1385
1385
if (ret )
@@ -1389,7 +1389,7 @@ static int erdma_init_user_cq(struct erdma_ucontext *ctx, struct erdma_cq *cq,
1389
1389
& cq -> user_cq .user_dbr_page ,
1390
1390
& cq -> user_cq .db_info_dma_addr );
1391
1391
if (ret )
1392
- put_mtt_entries (dev , & cq -> user_cq .qbuf_mtt );
1392
+ put_mtt_entries (dev , & cq -> user_cq .qbuf_mem );
1393
1393
1394
1394
return ret ;
1395
1395
}
@@ -1473,7 +1473,7 @@ int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
1473
1473
err_free_res :
1474
1474
if (!rdma_is_kernel_res (& ibcq -> res )) {
1475
1475
erdma_unmap_user_dbrecords (ctx , & cq -> user_cq .user_dbr_page );
1476
- put_mtt_entries (dev , & cq -> user_cq .qbuf_mtt );
1476
+ put_mtt_entries (dev , & cq -> user_cq .qbuf_mem );
1477
1477
} else {
1478
1478
dma_free_coherent (& dev -> pdev -> dev ,
1479
1479
WARPPED_BUFSIZE (depth << CQE_SHIFT ),
0 commit comments