Skip to content

Commit 24ce947

Browse files
selvintxavierjgunthorpe
authored andcommitted
RDMA/bnxt_re: Use the common mmap helper functions
Replace the mmap handling function with common code in IB core. Create rdma_user_mmap_entry for each mmap resource and add to the ib_core mmap list. Add mmap_free verb support. Also, use rdma_user_mmap_io while mapping Doorbell pages. Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Selvin Xavier <[email protected]> Signed-off-by: Jason Gunthorpe <[email protected]>
1 parent 147394d commit 24ce947

File tree

4 files changed

+119
-24
lines changed

4 files changed

+119
-24
lines changed

drivers/infiniband/hw/bnxt_re/ib_verbs.c

Lines changed: 102 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -533,12 +533,55 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
533533
return rc;
534534
}
535535

536+
static struct bnxt_re_user_mmap_entry*
537+
bnxt_re_mmap_entry_insert(struct bnxt_re_ucontext *uctx, u64 mem_offset,
538+
enum bnxt_re_mmap_flag mmap_flag, u64 *offset)
539+
{
540+
struct bnxt_re_user_mmap_entry *entry;
541+
int ret;
542+
543+
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
544+
if (!entry)
545+
return NULL;
546+
547+
entry->mem_offset = mem_offset;
548+
entry->mmap_flag = mmap_flag;
549+
550+
switch (mmap_flag) {
551+
case BNXT_RE_MMAP_SH_PAGE:
552+
ret = rdma_user_mmap_entry_insert_exact(&uctx->ib_uctx,
553+
&entry->rdma_entry, PAGE_SIZE, 0);
554+
break;
555+
case BNXT_RE_MMAP_UC_DB:
556+
ret = rdma_user_mmap_entry_insert(&uctx->ib_uctx,
557+
&entry->rdma_entry, PAGE_SIZE);
558+
break;
559+
default:
560+
ret = -EINVAL;
561+
break;
562+
}
563+
564+
if (ret) {
565+
kfree(entry);
566+
return NULL;
567+
}
568+
if (offset)
569+
*offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
570+
571+
return entry;
572+
}
573+
536574
/* Protection Domains */
537575
int bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
538576
{
539577
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
540578
struct bnxt_re_dev *rdev = pd->rdev;
541579

580+
if (udata) {
581+
rdma_user_mmap_entry_remove(pd->pd_db_mmap);
582+
pd->pd_db_mmap = NULL;
583+
}
584+
542585
bnxt_re_destroy_fence_mr(pd);
543586

544587
if (pd->qplib_pd.id) {
@@ -557,7 +600,8 @@ int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
557600
struct bnxt_re_ucontext *ucntx = rdma_udata_to_drv_context(
558601
udata, struct bnxt_re_ucontext, ib_uctx);
559602
struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd);
560-
int rc;
603+
struct bnxt_re_user_mmap_entry *entry = NULL;
604+
int rc = 0;
561605

562606
pd->rdev = rdev;
563607
if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
@@ -567,7 +611,7 @@ int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
567611
}
568612

569613
if (udata) {
570-
struct bnxt_re_pd_resp resp;
614+
struct bnxt_re_pd_resp resp = {};
571615

572616
if (!ucntx->dpi.dbr) {
573617
/* Allocate DPI in alloc_pd to avoid failing of
@@ -584,12 +628,21 @@ int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
584628
resp.pdid = pd->qplib_pd.id;
585629
/* Still allow mapping this DBR to the new user PD. */
586630
resp.dpi = ucntx->dpi.dpi;
587-
resp.dbr = (u64)ucntx->dpi.umdbr;
588631

589-
rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
632+
entry = bnxt_re_mmap_entry_insert(ucntx, (u64)ucntx->dpi.umdbr,
633+
BNXT_RE_MMAP_UC_DB, &resp.dbr);
634+
635+
if (!entry) {
636+
rc = -ENOMEM;
637+
goto dbfail;
638+
}
639+
640+
pd->pd_db_mmap = &entry->rdma_entry;
641+
642+
rc = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
590643
if (rc) {
591-
ibdev_err(&rdev->ibdev,
592-
"Failed to copy user response\n");
644+
rdma_user_mmap_entry_remove(pd->pd_db_mmap);
645+
rc = -EFAULT;
593646
goto dbfail;
594647
}
595648
}
@@ -3964,6 +4017,7 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
39644017
container_of(ctx, struct bnxt_re_ucontext, ib_uctx);
39654018
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
39664019
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
4020+
struct bnxt_re_user_mmap_entry *entry;
39674021
struct bnxt_re_uctx_resp resp = {};
39684022
u32 chip_met_rev_num = 0;
39694023
int rc;
@@ -4002,6 +4056,13 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
40024056
resp.comp_mask |= BNXT_RE_UCNTX_CMASK_HAVE_MODE;
40034057
resp.mode = rdev->chip_ctx->modes.wqe_mode;
40044058

4059+
entry = bnxt_re_mmap_entry_insert(uctx, 0, BNXT_RE_MMAP_SH_PAGE, NULL);
4060+
if (!entry) {
4061+
rc = -ENOMEM;
4062+
goto cfail;
4063+
}
4064+
uctx->shpage_mmap = &entry->rdma_entry;
4065+
40054066
rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
40064067
if (rc) {
40074068
ibdev_err(ibdev, "Failed to copy user context");
@@ -4025,6 +4086,8 @@ void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
40254086

40264087
struct bnxt_re_dev *rdev = uctx->rdev;
40274088

4089+
rdma_user_mmap_entry_remove(uctx->shpage_mmap);
4090+
uctx->shpage_mmap = NULL;
40284091
if (uctx->shpg)
40294092
free_page((unsigned long)uctx->shpg);
40304093

@@ -4044,27 +4107,43 @@ int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
40444107
struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
40454108
struct bnxt_re_ucontext,
40464109
ib_uctx);
4047-
struct bnxt_re_dev *rdev = uctx->rdev;
4110+
struct bnxt_re_user_mmap_entry *bnxt_entry;
4111+
struct rdma_user_mmap_entry *rdma_entry;
4112+
int ret = 0;
40484113
u64 pfn;
40494114

4050-
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
4115+
rdma_entry = rdma_user_mmap_entry_get(&uctx->ib_uctx, vma);
4116+
if (!rdma_entry)
40514117
return -EINVAL;
40524118

4053-
if (vma->vm_pgoff) {
4054-
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
4055-
if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
4056-
PAGE_SIZE, vma->vm_page_prot)) {
4057-
ibdev_err(&rdev->ibdev, "Failed to map DPI");
4058-
return -EAGAIN;
4059-
}
4060-
} else {
4061-
pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
4062-
if (remap_pfn_range(vma, vma->vm_start,
4063-
pfn, PAGE_SIZE, vma->vm_page_prot)) {
4064-
ibdev_err(&rdev->ibdev, "Failed to map shared page");
4065-
return -EAGAIN;
4066-
}
4119+
bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry,
4120+
rdma_entry);
4121+
4122+
switch (bnxt_entry->mmap_flag) {
4123+
case BNXT_RE_MMAP_UC_DB:
4124+
pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
4125+
ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
4126+
pgprot_noncached(vma->vm_page_prot),
4127+
rdma_entry);
4128+
break;
4129+
case BNXT_RE_MMAP_SH_PAGE:
4130+
ret = vm_insert_page(vma, vma->vm_start, virt_to_page(uctx->shpg));
4131+
break;
4132+
default:
4133+
ret = -EINVAL;
4134+
break;
40674135
}
40684136

4069-
return 0;
4137+
rdma_user_mmap_entry_put(rdma_entry);
4138+
return ret;
4139+
}
4140+
4141+
void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
4142+
{
4143+
struct bnxt_re_user_mmap_entry *bnxt_entry;
4144+
4145+
bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry,
4146+
rdma_entry);
4147+
4148+
kfree(bnxt_entry);
40704149
}

drivers/infiniband/hw/bnxt_re/ib_verbs.h

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,7 @@ struct bnxt_re_pd {
6060
struct bnxt_re_dev *rdev;
6161
struct bnxt_qplib_pd qplib_pd;
6262
struct bnxt_re_fence_data fence;
63+
struct rdma_user_mmap_entry *pd_db_mmap;
6364
};
6465

6566
struct bnxt_re_ah {
@@ -136,6 +137,18 @@ struct bnxt_re_ucontext {
136137
struct bnxt_qplib_dpi dpi;
137138
void *shpg;
138139
spinlock_t sh_lock; /* protect shpg */
140+
struct rdma_user_mmap_entry *shpage_mmap;
141+
};
142+
143+
enum bnxt_re_mmap_flag {
144+
BNXT_RE_MMAP_SH_PAGE,
145+
BNXT_RE_MMAP_UC_DB,
146+
};
147+
148+
struct bnxt_re_user_mmap_entry {
149+
struct rdma_user_mmap_entry rdma_entry;
150+
u64 mem_offset;
151+
u8 mmap_flag;
139152
};
140153

141154
static inline u16 bnxt_re_get_swqe_size(int nsge)
@@ -213,6 +226,8 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
213226
int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata);
214227
void bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
215228
int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
229+
void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
230+
216231

217232
unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp);
218233
void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags);

drivers/infiniband/hw/bnxt_re/main.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -545,6 +545,7 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
545545
.get_port_immutable = bnxt_re_get_port_immutable,
546546
.map_mr_sg = bnxt_re_map_mr_sg,
547547
.mmap = bnxt_re_mmap,
548+
.mmap_free = bnxt_re_mmap_free,
548549
.modify_qp = bnxt_re_modify_qp,
549550
.modify_srq = bnxt_re_modify_srq,
550551
.poll_cq = bnxt_re_poll_cq,

drivers/infiniband/hw/bnxt_re/qplib_res.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -813,7 +813,7 @@ static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
813813
return 0;
814814

815815
unmap_io:
816-
pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
816+
iounmap(dpit->dbr_bar_reg_iomem);
817817
dpit->dbr_bar_reg_iomem = NULL;
818818
return -ENOMEM;
819819
}

0 commit comments

Comments
 (0)