|
25 | 25 |
|
26 | 26 | #define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
|
27 | 27 | #define NVMET_TCP_MAXH2CDATA 0x400000 /* 16M arbitrary limit */
|
| 28 | +#define NVMET_TCP_BACKLOG 128 |
28 | 29 |
|
29 | 30 | static int param_store_val(const char *str, int *val, int min, int max)
|
30 | 31 | {
|
@@ -2067,7 +2068,7 @@ static int nvmet_tcp_add_port(struct nvmet_port *nport)
|
2067 | 2068 | goto err_sock;
|
2068 | 2069 | }
|
2069 | 2070 |
|
2070 |
| - ret = kernel_listen(port->sock, 128); |
| 2071 | + ret = kernel_listen(port->sock, NVMET_TCP_BACKLOG); |
2071 | 2072 | if (ret) {
|
2072 | 2073 | pr_err("failed to listen %d on port sock\n", ret);
|
2073 | 2074 | goto err_sock;
|
@@ -2133,8 +2134,19 @@ static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
|
2133 | 2134 | container_of(sq, struct nvmet_tcp_queue, nvme_sq);
|
2134 | 2135 |
|
2135 | 2136 | if (sq->qid == 0) {
|
2136 |
| - /* Let inflight controller teardown complete */ |
2137 |
| - flush_workqueue(nvmet_wq); |
| 2137 | + struct nvmet_tcp_queue *q; |
| 2138 | + int pending = 0; |
| 2139 | + |
| 2140 | + /* Check for pending controller teardown */ |
| 2141 | + mutex_lock(&nvmet_tcp_queue_mutex); |
| 2142 | + list_for_each_entry(q, &nvmet_tcp_queue_list, queue_list) { |
| 2143 | + if (q->nvme_sq.ctrl == sq->ctrl && |
| 2144 | + q->state == NVMET_TCP_Q_DISCONNECTING) |
| 2145 | + pending++; |
| 2146 | + } |
| 2147 | + mutex_unlock(&nvmet_tcp_queue_mutex); |
| 2148 | + if (pending > NVMET_TCP_BACKLOG) |
| 2149 | + return NVME_SC_CONNECT_CTRL_BUSY; |
2138 | 2150 | }
|
2139 | 2151 |
|
2140 | 2152 | queue->nr_cmds = sq->size * 2;
|
|
0 commit comments