@@ -3974,16 +3974,13 @@ int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
3974
3974
return rc ;
3975
3975
}
3976
3976
3977
- /* uverbs */
3978
- struct ib_mr * bnxt_re_reg_user_mr (struct ib_pd * ib_pd , u64 start , u64 length ,
3979
- u64 virt_addr , int mr_access_flags ,
3980
- struct ib_udata * udata )
3977
+ static struct ib_mr * __bnxt_re_user_reg_mr (struct ib_pd * ib_pd , u64 length , u64 virt_addr ,
3978
+ int mr_access_flags , struct ib_umem * umem )
3981
3979
{
3982
3980
struct bnxt_re_pd * pd = container_of (ib_pd , struct bnxt_re_pd , ib_pd );
3983
3981
struct bnxt_re_dev * rdev = pd -> rdev ;
3984
- struct bnxt_re_mr * mr ;
3985
- struct ib_umem * umem ;
3986
3982
unsigned long page_size ;
3983
+ struct bnxt_re_mr * mr ;
3987
3984
int umem_pgs , rc ;
3988
3985
u32 active_mrs ;
3989
3986
@@ -3993,6 +3990,12 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3993
3990
return ERR_PTR (- ENOMEM );
3994
3991
}
3995
3992
3993
+ page_size = ib_umem_find_best_pgsz (umem , BNXT_RE_PAGE_SIZE_SUPPORTED , virt_addr );
3994
+ if (!page_size ) {
3995
+ ibdev_err (& rdev -> ibdev , "umem page size unsupported!" );
3996
+ return ERR_PTR (- EINVAL );
3997
+ }
3998
+
3996
3999
mr = kzalloc (sizeof (* mr ), GFP_KERNEL );
3997
4000
if (!mr )
3998
4001
return ERR_PTR (- ENOMEM );
@@ -4004,36 +4007,23 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
4004
4007
4005
4008
rc = bnxt_qplib_alloc_mrw (& rdev -> qplib_res , & mr -> qplib_mr );
4006
4009
if (rc ) {
4007
- ibdev_err (& rdev -> ibdev , "Failed to allocate MR" );
4010
+ ibdev_err (& rdev -> ibdev , "Failed to allocate MR rc = %d" , rc );
4011
+ rc = - EIO ;
4008
4012
goto free_mr ;
4009
4013
}
4010
4014
/* The fixed portion of the rkey is the same as the lkey */
4011
4015
mr -> ib_mr .rkey = mr -> qplib_mr .rkey ;
4012
-
4013
- umem = ib_umem_get (& rdev -> ibdev , start , length , mr_access_flags );
4014
- if (IS_ERR (umem )) {
4015
- ibdev_err (& rdev -> ibdev , "Failed to get umem" );
4016
- rc = - EFAULT ;
4017
- goto free_mrw ;
4018
- }
4019
4016
mr -> ib_umem = umem ;
4020
-
4021
4017
mr -> qplib_mr .va = virt_addr ;
4022
- page_size = ib_umem_find_best_pgsz (
4023
- umem , BNXT_RE_PAGE_SIZE_SUPPORTED , virt_addr );
4024
- if (!page_size ) {
4025
- ibdev_err (& rdev -> ibdev , "umem page size unsupported!" );
4026
- rc = - EFAULT ;
4027
- goto free_umem ;
4028
- }
4029
4018
mr -> qplib_mr .total_size = length ;
4030
4019
4031
4020
umem_pgs = ib_umem_num_dma_blocks (umem , page_size );
4032
4021
rc = bnxt_qplib_reg_mr (& rdev -> qplib_res , & mr -> qplib_mr , umem ,
4033
4022
umem_pgs , page_size );
4034
4023
if (rc ) {
4035
- ibdev_err (& rdev -> ibdev , "Failed to register user MR" );
4036
- goto free_umem ;
4024
+ ibdev_err (& rdev -> ibdev , "Failed to register user MR - rc = %d\n" , rc );
4025
+ rc = - EIO ;
4026
+ goto free_mrw ;
4037
4027
}
4038
4028
4039
4029
mr -> ib_mr .lkey = mr -> qplib_mr .lkey ;
@@ -4043,15 +4033,56 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
4043
4033
rdev -> stats .res .mr_watermark = active_mrs ;
4044
4034
4045
4035
return & mr -> ib_mr ;
4046
- free_umem :
4047
- ib_umem_release (umem );
4036
+
4048
4037
free_mrw :
4049
4038
bnxt_qplib_free_mrw (& rdev -> qplib_res , & mr -> qplib_mr );
4050
4039
free_mr :
4051
4040
kfree (mr );
4052
4041
return ERR_PTR (rc );
4053
4042
}
4054
4043
4044
+ struct ib_mr * bnxt_re_reg_user_mr (struct ib_pd * ib_pd , u64 start , u64 length ,
4045
+ u64 virt_addr , int mr_access_flags ,
4046
+ struct ib_udata * udata )
4047
+ {
4048
+ struct bnxt_re_pd * pd = container_of (ib_pd , struct bnxt_re_pd , ib_pd );
4049
+ struct bnxt_re_dev * rdev = pd -> rdev ;
4050
+ struct ib_umem * umem ;
4051
+ struct ib_mr * ib_mr ;
4052
+
4053
+ umem = ib_umem_get (& rdev -> ibdev , start , length , mr_access_flags );
4054
+ if (IS_ERR (umem ))
4055
+ return ERR_CAST (umem );
4056
+
4057
+ ib_mr = __bnxt_re_user_reg_mr (ib_pd , length , virt_addr , mr_access_flags , umem );
4058
+ if (IS_ERR (ib_mr ))
4059
+ ib_umem_release (umem );
4060
+ return ib_mr ;
4061
+ }
4062
+
4063
+ struct ib_mr * bnxt_re_reg_user_mr_dmabuf (struct ib_pd * ib_pd , u64 start ,
4064
+ u64 length , u64 virt_addr , int fd ,
4065
+ int mr_access_flags , struct ib_udata * udata )
4066
+ {
4067
+ struct bnxt_re_pd * pd = container_of (ib_pd , struct bnxt_re_pd , ib_pd );
4068
+ struct bnxt_re_dev * rdev = pd -> rdev ;
4069
+ struct ib_umem_dmabuf * umem_dmabuf ;
4070
+ struct ib_umem * umem ;
4071
+ struct ib_mr * ib_mr ;
4072
+
4073
+ umem_dmabuf = ib_umem_dmabuf_get_pinned (& rdev -> ibdev , start , length ,
4074
+ fd , mr_access_flags );
4075
+ if (IS_ERR (umem_dmabuf ))
4076
+ return ERR_CAST (umem_dmabuf );
4077
+
4078
+ umem = & umem_dmabuf -> umem ;
4079
+
4080
+ ib_mr = __bnxt_re_user_reg_mr (ib_pd , length , virt_addr , mr_access_flags , umem );
4081
+ if (IS_ERR (ib_mr ))
4082
+ ib_umem_release (umem );
4083
+ return ib_mr ;
4084
+ }
4085
+
4055
4086
int bnxt_re_alloc_ucontext (struct ib_ucontext * ctx , struct ib_udata * udata )
4056
4087
{
4057
4088
struct ib_device * ibdev = ctx -> device ;
0 commit comments