@@ -9,17 +9,22 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
9
9
struct ib_udata * udata )
10
10
{
11
11
struct mana_ib_cq * cq = container_of (ibcq , struct mana_ib_cq , ibcq );
12
+ struct mana_ib_create_cq_resp resp = {};
13
+ struct mana_ib_ucontext * mana_ucontext ;
12
14
struct ib_device * ibdev = ibcq -> device ;
13
15
struct mana_ib_create_cq ucmd = {};
14
16
struct mana_ib_dev * mdev ;
17
+ bool is_rnic_cq ;
18
+ u32 doorbell ;
15
19
int err ;
16
20
17
21
mdev = container_of (ibdev , struct mana_ib_dev , ib_dev );
18
22
19
- if (udata -> inlen < sizeof (ucmd ))
20
- return - EINVAL ;
21
-
22
23
cq -> comp_vector = attr -> comp_vector % ibdev -> num_comp_vectors ;
24
+ cq -> cq_handle = INVALID_MANA_HANDLE ;
25
+
26
+ if (udata -> inlen < offsetof(struct mana_ib_create_cq , flags ))
27
+ return - EINVAL ;
23
28
24
29
err = ib_copy_from_udata (& ucmd , udata , min (sizeof (ucmd ), udata -> inlen ));
25
30
if (err ) {
@@ -28,7 +33,9 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
28
33
return err ;
29
34
}
30
35
31
- if (attr -> cqe > mdev -> adapter_caps .max_qp_wr ) {
36
+ is_rnic_cq = !!(ucmd .flags & MANA_IB_CREATE_RNIC_CQ );
37
+
38
+ if (!is_rnic_cq && attr -> cqe > mdev -> adapter_caps .max_qp_wr ) {
32
39
ibdev_dbg (ibdev , "CQE %d exceeding limit\n" , attr -> cqe );
33
40
return - EINVAL ;
34
41
}
@@ -40,7 +47,41 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
40
47
return err ;
41
48
}
42
49
50
+ mana_ucontext = rdma_udata_to_drv_context (udata , struct mana_ib_ucontext ,
51
+ ibucontext );
52
+ doorbell = mana_ucontext -> doorbell ;
53
+
54
+ if (is_rnic_cq ) {
55
+ err = mana_ib_gd_create_cq (mdev , cq , doorbell );
56
+ if (err ) {
57
+ ibdev_dbg (ibdev , "Failed to create RNIC cq, %d\n" , err );
58
+ goto err_destroy_queue ;
59
+ }
60
+
61
+ err = mana_ib_install_cq_cb (mdev , cq );
62
+ if (err ) {
63
+ ibdev_dbg (ibdev , "Failed to install cq callback, %d\n" , err );
64
+ goto err_destroy_rnic_cq ;
65
+ }
66
+ }
67
+
68
+ resp .cqid = cq -> queue .id ;
69
+ err = ib_copy_to_udata (udata , & resp , min (sizeof (resp ), udata -> outlen ));
70
+ if (err ) {
71
+ ibdev_dbg (& mdev -> ib_dev , "Failed to copy to udata, %d\n" , err );
72
+ goto err_remove_cq_cb ;
73
+ }
74
+
43
75
return 0 ;
76
+
77
+ err_remove_cq_cb :
78
+ mana_ib_remove_cq_cb (mdev , cq );
79
+ err_destroy_rnic_cq :
80
+ mana_ib_gd_destroy_cq (mdev , cq );
81
+ err_destroy_queue :
82
+ mana_ib_destroy_queue (mdev , & cq -> queue );
83
+
84
+ return err ;
44
85
}
45
86
46
87
int mana_ib_destroy_cq (struct ib_cq * ibcq , struct ib_udata * udata )
@@ -52,6 +93,12 @@ int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
52
93
mdev = container_of (ibdev , struct mana_ib_dev , ib_dev );
53
94
54
95
mana_ib_remove_cq_cb (mdev , cq );
96
+
97
+ /* Ignore return code as there is not much we can do about it.
98
+ * The error message is printed inside.
99
+ */
100
+ mana_ib_gd_destroy_cq (mdev , cq );
101
+
55
102
mana_ib_destroy_queue (mdev , & cq -> queue );
56
103
57
104
return 0 ;
0 commit comments