@@ -533,12 +533,55 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
533533 return rc ;
534534}
535535
536+ static struct bnxt_re_user_mmap_entry *
537+ bnxt_re_mmap_entry_insert (struct bnxt_re_ucontext * uctx , u64 mem_offset ,
538+ enum bnxt_re_mmap_flag mmap_flag , u64 * offset )
539+ {
540+ struct bnxt_re_user_mmap_entry * entry ;
541+ int ret ;
542+
543+ entry = kzalloc (sizeof (* entry ), GFP_KERNEL );
544+ if (!entry )
545+ return NULL ;
546+
547+ entry -> mem_offset = mem_offset ;
548+ entry -> mmap_flag = mmap_flag ;
549+
550+ switch (mmap_flag ) {
551+ case BNXT_RE_MMAP_SH_PAGE :
552+ ret = rdma_user_mmap_entry_insert_exact (& uctx -> ib_uctx ,
553+ & entry -> rdma_entry , PAGE_SIZE , 0 );
554+ break ;
555+ case BNXT_RE_MMAP_UC_DB :
556+ ret = rdma_user_mmap_entry_insert (& uctx -> ib_uctx ,
557+ & entry -> rdma_entry , PAGE_SIZE );
558+ break ;
559+ default :
560+ ret = - EINVAL ;
561+ break ;
562+ }
563+
564+ if (ret ) {
565+ kfree (entry );
566+ return NULL ;
567+ }
568+ if (offset )
569+ * offset = rdma_user_mmap_get_offset (& entry -> rdma_entry );
570+
571+ return entry ;
572+ }
573+
536574/* Protection Domains */
537575int bnxt_re_dealloc_pd (struct ib_pd * ib_pd , struct ib_udata * udata )
538576{
539577 struct bnxt_re_pd * pd = container_of (ib_pd , struct bnxt_re_pd , ib_pd );
540578 struct bnxt_re_dev * rdev = pd -> rdev ;
541579
580+ if (udata ) {
581+ rdma_user_mmap_entry_remove (pd -> pd_db_mmap );
582+ pd -> pd_db_mmap = NULL ;
583+ }
584+
542585 bnxt_re_destroy_fence_mr (pd );
543586
544587 if (pd -> qplib_pd .id ) {
@@ -557,7 +600,8 @@ int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
557600 struct bnxt_re_ucontext * ucntx = rdma_udata_to_drv_context (
558601 udata , struct bnxt_re_ucontext , ib_uctx );
559602 struct bnxt_re_pd * pd = container_of (ibpd , struct bnxt_re_pd , ib_pd );
560- int rc ;
603+ struct bnxt_re_user_mmap_entry * entry = NULL ;
604+ int rc = 0 ;
561605
562606 pd -> rdev = rdev ;
563607 if (bnxt_qplib_alloc_pd (& rdev -> qplib_res .pd_tbl , & pd -> qplib_pd )) {
@@ -567,7 +611,7 @@ int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
567611 }
568612
569613 if (udata ) {
570- struct bnxt_re_pd_resp resp ;
614+ struct bnxt_re_pd_resp resp = {} ;
571615
572616 if (!ucntx -> dpi .dbr ) {
573617 /* Allocate DPI in alloc_pd to avoid failing of
@@ -584,12 +628,21 @@ int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
584628 resp .pdid = pd -> qplib_pd .id ;
585629 /* Still allow mapping this DBR to the new user PD. */
586630 resp .dpi = ucntx -> dpi .dpi ;
587- resp .dbr = (u64 )ucntx -> dpi .umdbr ;
588631
589- rc = ib_copy_to_udata (udata , & resp , sizeof (resp ));
632+ entry = bnxt_re_mmap_entry_insert (ucntx , (u64 )ucntx -> dpi .umdbr ,
633+ BNXT_RE_MMAP_UC_DB , & resp .dbr );
634+
635+ if (!entry ) {
636+ rc = - ENOMEM ;
637+ goto dbfail ;
638+ }
639+
640+ pd -> pd_db_mmap = & entry -> rdma_entry ;
641+
642+ rc = ib_copy_to_udata (udata , & resp , min (sizeof (resp ), udata -> outlen ));
590643 if (rc ) {
591- ibdev_err ( & rdev -> ibdev ,
592- "Failed to copy user response\n" ) ;
644+ rdma_user_mmap_entry_remove ( pd -> pd_db_mmap );
645+ rc = - EFAULT ;
593646 goto dbfail ;
594647 }
595648 }
@@ -3964,6 +4017,7 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
39644017 container_of (ctx , struct bnxt_re_ucontext , ib_uctx );
39654018 struct bnxt_re_dev * rdev = to_bnxt_re_dev (ibdev , ibdev );
39664019 struct bnxt_qplib_dev_attr * dev_attr = & rdev -> dev_attr ;
4020+ struct bnxt_re_user_mmap_entry * entry ;
39674021 struct bnxt_re_uctx_resp resp = {};
39684022 u32 chip_met_rev_num = 0 ;
39694023 int rc ;
@@ -4002,6 +4056,13 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
40024056 resp .comp_mask |= BNXT_RE_UCNTX_CMASK_HAVE_MODE ;
40034057 resp .mode = rdev -> chip_ctx -> modes .wqe_mode ;
40044058
4059+ entry = bnxt_re_mmap_entry_insert (uctx , 0 , BNXT_RE_MMAP_SH_PAGE , NULL );
4060+ if (!entry ) {
4061+ rc = - ENOMEM ;
4062+ goto cfail ;
4063+ }
4064+ uctx -> shpage_mmap = & entry -> rdma_entry ;
4065+
40054066 rc = ib_copy_to_udata (udata , & resp , min (udata -> outlen , sizeof (resp )));
40064067 if (rc ) {
40074068 ibdev_err (ibdev , "Failed to copy user context" );
@@ -4025,6 +4086,8 @@ void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
40254086
40264087 struct bnxt_re_dev * rdev = uctx -> rdev ;
40274088
4089+ rdma_user_mmap_entry_remove (uctx -> shpage_mmap );
4090+ uctx -> shpage_mmap = NULL ;
40284091 if (uctx -> shpg )
40294092 free_page ((unsigned long )uctx -> shpg );
40304093
@@ -4044,27 +4107,43 @@ int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
40444107 struct bnxt_re_ucontext * uctx = container_of (ib_uctx ,
40454108 struct bnxt_re_ucontext ,
40464109 ib_uctx );
4047- struct bnxt_re_dev * rdev = uctx -> rdev ;
4110+ struct bnxt_re_user_mmap_entry * bnxt_entry ;
4111+ struct rdma_user_mmap_entry * rdma_entry ;
4112+ int ret = 0 ;
40484113 u64 pfn ;
40494114
4050- if (vma -> vm_end - vma -> vm_start != PAGE_SIZE )
4115+ rdma_entry = rdma_user_mmap_entry_get (& uctx -> ib_uctx , vma );
4116+ if (!rdma_entry )
40514117 return - EINVAL ;
40524118
4053- if (vma -> vm_pgoff ) {
4054- vma -> vm_page_prot = pgprot_noncached (vma -> vm_page_prot );
4055- if (io_remap_pfn_range (vma , vma -> vm_start , vma -> vm_pgoff ,
4056- PAGE_SIZE , vma -> vm_page_prot )) {
4057- ibdev_err (& rdev -> ibdev , "Failed to map DPI" );
4058- return - EAGAIN ;
4059- }
4060- } else {
4061- pfn = virt_to_phys (uctx -> shpg ) >> PAGE_SHIFT ;
4062- if (remap_pfn_range (vma , vma -> vm_start ,
4063- pfn , PAGE_SIZE , vma -> vm_page_prot )) {
4064- ibdev_err (& rdev -> ibdev , "Failed to map shared page" );
4065- return - EAGAIN ;
4066- }
4119+ bnxt_entry = container_of (rdma_entry , struct bnxt_re_user_mmap_entry ,
4120+ rdma_entry );
4121+
4122+ switch (bnxt_entry -> mmap_flag ) {
4123+ case BNXT_RE_MMAP_UC_DB :
4124+ pfn = bnxt_entry -> mem_offset >> PAGE_SHIFT ;
4125+ ret = rdma_user_mmap_io (ib_uctx , vma , pfn , PAGE_SIZE ,
4126+ pgprot_noncached (vma -> vm_page_prot ),
4127+ rdma_entry );
4128+ break ;
4129+ case BNXT_RE_MMAP_SH_PAGE :
4130+ ret = vm_insert_page (vma , vma -> vm_start , virt_to_page (uctx -> shpg ));
4131+ break ;
4132+ default :
4133+ ret = - EINVAL ;
4134+ break ;
40674135 }
40684136
4069- return 0 ;
4137+ rdma_user_mmap_entry_put (rdma_entry );
4138+ return ret ;
4139+ }
4140+
4141+ void bnxt_re_mmap_free (struct rdma_user_mmap_entry * rdma_entry )
4142+ {
4143+ struct bnxt_re_user_mmap_entry * bnxt_entry ;
4144+
4145+ bnxt_entry = container_of (rdma_entry , struct bnxt_re_user_mmap_entry ,
4146+ rdma_entry );
4147+
4148+ kfree (bnxt_entry );
40704149}
0 commit comments