@@ -870,6 +870,139 @@ static const struct attribute_group bnxt_re_dev_attr_group = {
870
870
.attrs = bnxt_re_attributes ,
871
871
};
872
872
873
+ static int bnxt_re_fill_res_mr_entry (struct sk_buff * msg , struct ib_mr * ib_mr )
874
+ {
875
+ struct bnxt_qplib_hwq * mr_hwq ;
876
+ struct nlattr * table_attr ;
877
+ struct bnxt_re_mr * mr ;
878
+
879
+ table_attr = nla_nest_start (msg , RDMA_NLDEV_ATTR_DRIVER );
880
+ if (!table_attr )
881
+ return - EMSGSIZE ;
882
+
883
+ mr = container_of (ib_mr , struct bnxt_re_mr , ib_mr );
884
+ mr_hwq = & mr -> qplib_mr .hwq ;
885
+
886
+ if (rdma_nl_put_driver_u32 (msg , "page_size" ,
887
+ mr_hwq -> qe_ppg * mr_hwq -> element_size ))
888
+ goto err ;
889
+ if (rdma_nl_put_driver_u32 (msg , "max_elements" , mr_hwq -> max_elements ))
890
+ goto err ;
891
+ if (rdma_nl_put_driver_u32 (msg , "element_size" , mr_hwq -> element_size ))
892
+ goto err ;
893
+ if (rdma_nl_put_driver_u64_hex (msg , "hwq" , (unsigned long )mr_hwq ))
894
+ goto err ;
895
+ if (rdma_nl_put_driver_u64_hex (msg , "va" , mr -> qplib_mr .va ))
896
+ goto err ;
897
+
898
+ nla_nest_end (msg , table_attr );
899
+ return 0 ;
900
+
901
+ err :
902
+ nla_nest_cancel (msg , table_attr );
903
+ return - EMSGSIZE ;
904
+ }
905
+
906
+ static int bnxt_re_fill_res_cq_entry (struct sk_buff * msg , struct ib_cq * ib_cq )
907
+ {
908
+ struct bnxt_qplib_hwq * cq_hwq ;
909
+ struct nlattr * table_attr ;
910
+ struct bnxt_re_cq * cq ;
911
+
912
+ cq = container_of (ib_cq , struct bnxt_re_cq , ib_cq );
913
+ cq_hwq = & cq -> qplib_cq .hwq ;
914
+
915
+ table_attr = nla_nest_start (msg , RDMA_NLDEV_ATTR_DRIVER );
916
+ if (!table_attr )
917
+ return - EMSGSIZE ;
918
+
919
+ if (rdma_nl_put_driver_u32 (msg , "cq_depth" , cq_hwq -> depth ))
920
+ goto err ;
921
+ if (rdma_nl_put_driver_u32 (msg , "max_elements" , cq_hwq -> max_elements ))
922
+ goto err ;
923
+ if (rdma_nl_put_driver_u32 (msg , "element_size" , cq_hwq -> element_size ))
924
+ goto err ;
925
+ if (rdma_nl_put_driver_u32 (msg , "max_wqe" , cq -> qplib_cq .max_wqe ))
926
+ goto err ;
927
+
928
+ nla_nest_end (msg , table_attr );
929
+ return 0 ;
930
+
931
+ err :
932
+ nla_nest_cancel (msg , table_attr );
933
+ return - EMSGSIZE ;
934
+ }
935
+
936
+ static int bnxt_re_fill_res_qp_entry (struct sk_buff * msg , struct ib_qp * ib_qp )
937
+ {
938
+ struct bnxt_qplib_qp * qplib_qp ;
939
+ struct nlattr * table_attr ;
940
+ struct bnxt_re_qp * qp ;
941
+
942
+ table_attr = nla_nest_start (msg , RDMA_NLDEV_ATTR_DRIVER );
943
+ if (!table_attr )
944
+ return - EMSGSIZE ;
945
+
946
+ qp = container_of (ib_qp , struct bnxt_re_qp , ib_qp );
947
+ qplib_qp = & qp -> qplib_qp ;
948
+
949
+ if (rdma_nl_put_driver_u32 (msg , "sq_max_wqe" , qplib_qp -> sq .max_wqe ))
950
+ goto err ;
951
+ if (rdma_nl_put_driver_u32 (msg , "sq_max_sge" , qplib_qp -> sq .max_sge ))
952
+ goto err ;
953
+ if (rdma_nl_put_driver_u32 (msg , "sq_wqe_size" , qplib_qp -> sq .wqe_size ))
954
+ goto err ;
955
+ if (rdma_nl_put_driver_u32 (msg , "sq_swq_start" , qplib_qp -> sq .swq_start ))
956
+ goto err ;
957
+ if (rdma_nl_put_driver_u32 (msg , "sq_swq_last" , qplib_qp -> sq .swq_last ))
958
+ goto err ;
959
+ if (rdma_nl_put_driver_u32 (msg , "rq_max_wqe" , qplib_qp -> rq .max_wqe ))
960
+ goto err ;
961
+ if (rdma_nl_put_driver_u32 (msg , "rq_max_sge" , qplib_qp -> rq .max_sge ))
962
+ goto err ;
963
+ if (rdma_nl_put_driver_u32 (msg , "rq_wqe_size" , qplib_qp -> rq .wqe_size ))
964
+ goto err ;
965
+ if (rdma_nl_put_driver_u32 (msg , "rq_swq_start" , qplib_qp -> rq .swq_start ))
966
+ goto err ;
967
+ if (rdma_nl_put_driver_u32 (msg , "rq_swq_last" , qplib_qp -> rq .swq_last ))
968
+ goto err ;
969
+ if (rdma_nl_put_driver_u32 (msg , "timeout" , qplib_qp -> timeout ))
970
+ goto err ;
971
+
972
+ nla_nest_end (msg , table_attr );
973
+ return 0 ;
974
+
975
+ err :
976
+ nla_nest_cancel (msg , table_attr );
977
+ return - EMSGSIZE ;
978
+ }
979
+
980
+ static int bnxt_re_fill_res_srq_entry (struct sk_buff * msg , struct ib_srq * ib_srq )
981
+ {
982
+ struct nlattr * table_attr ;
983
+ struct bnxt_re_srq * srq ;
984
+
985
+ table_attr = nla_nest_start (msg , RDMA_NLDEV_ATTR_DRIVER );
986
+ if (!table_attr )
987
+ return - EMSGSIZE ;
988
+
989
+ srq = container_of (ib_srq , struct bnxt_re_srq , ib_srq );
990
+
991
+ if (rdma_nl_put_driver_u32_hex (msg , "wqe_size" , srq -> qplib_srq .wqe_size ))
992
+ goto err ;
993
+ if (rdma_nl_put_driver_u32_hex (msg , "max_wqe" , srq -> qplib_srq .max_wqe ))
994
+ goto err ;
995
+ if (rdma_nl_put_driver_u32_hex (msg , "max_sge" , srq -> qplib_srq .max_sge ))
996
+ goto err ;
997
+
998
+ nla_nest_end (msg , table_attr );
999
+ return 0 ;
1000
+
1001
+ err :
1002
+ nla_nest_cancel (msg , table_attr );
1003
+ return - EMSGSIZE ;
1004
+ }
1005
+
873
1006
static const struct ib_device_ops bnxt_re_dev_ops = {
874
1007
.owner = THIS_MODULE ,
875
1008
.driver_id = RDMA_DRIVER_BNXT_RE ,
@@ -928,6 +1061,13 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
928
1061
INIT_RDMA_OBJ_SIZE (ib_ucontext , bnxt_re_ucontext , ib_uctx ),
929
1062
};
930
1063
1064
+ static const struct ib_device_ops restrack_ops = {
1065
+ .fill_res_cq_entry = bnxt_re_fill_res_cq_entry ,
1066
+ .fill_res_qp_entry = bnxt_re_fill_res_qp_entry ,
1067
+ .fill_res_mr_entry = bnxt_re_fill_res_mr_entry ,
1068
+ .fill_res_srq_entry = bnxt_re_fill_res_srq_entry ,
1069
+ };
1070
+
931
1071
static int bnxt_re_register_ib (struct bnxt_re_dev * rdev )
932
1072
{
933
1073
struct ib_device * ibdev = & rdev -> ibdev ;
@@ -949,6 +1089,7 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
949
1089
ibdev -> driver_def = bnxt_re_uapi_defs ;
950
1090
951
1091
ib_set_device_ops (ibdev , & bnxt_re_dev_ops );
1092
+ ib_set_device_ops (ibdev , & restrack_ops );
952
1093
ret = ib_device_set_netdev (& rdev -> ibdev , rdev -> netdev , 1 );
953
1094
if (ret )
954
1095
return ret ;
0 commit comments