|
37 | 37 | #define NVMET_RDMA_MAX_MDTS 8
|
38 | 38 | #define NVMET_RDMA_MAX_METADATA_MDTS 5
|
39 | 39 |
|
| 40 | +#define NVMET_RDMA_BACKLOG 128 |
| 41 | + |
40 | 42 | struct nvmet_rdma_srq;
|
41 | 43 |
|
42 | 44 | struct nvmet_rdma_cmd {
|
@@ -1583,8 +1585,19 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
|
1583 | 1585 | }
|
1584 | 1586 |
|
1585 | 1587 | if (queue->host_qid == 0) {
|
1586 |
| - /* Let inflight controller teardown complete */ |
1587 |
| - flush_workqueue(nvmet_wq); |
| 1588 | + struct nvmet_rdma_queue *q; |
| 1589 | + int pending = 0; |
| 1590 | + |
| 1591 | + /* Check for pending controller teardown */ |
| 1592 | + mutex_lock(&nvmet_rdma_queue_mutex); |
| 1593 | + list_for_each_entry(q, &nvmet_rdma_queue_list, queue_list) { |
| 1594 | + if (q->nvme_sq.ctrl == queue->nvme_sq.ctrl && |
| 1595 | + q->state == NVMET_RDMA_Q_DISCONNECTING) |
| 1596 | + pending++; |
| 1597 | + } |
| 1598 | + mutex_unlock(&nvmet_rdma_queue_mutex); |
| 1599 | + if (pending > NVMET_RDMA_BACKLOG) |
| 1600 | + return NVME_SC_CONNECT_CTRL_BUSY; |
1588 | 1601 | }
|
1589 | 1602 |
|
1590 | 1603 | ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
|
@@ -1880,7 +1893,7 @@ static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port)
|
1880 | 1893 | goto out_destroy_id;
|
1881 | 1894 | }
|
1882 | 1895 |
|
1883 |
| - ret = rdma_listen(cm_id, 128); |
| 1896 | + ret = rdma_listen(cm_id, NVMET_RDMA_BACKLOG); |
1884 | 1897 | if (ret) {
|
1885 | 1898 | pr_err("listening to %pISpcs failed (%d)\n", addr, ret);
|
1886 | 1899 | goto out_destroy_id;
|
|
0 commit comments