Skip to content

Commit a249d30

Browse files
committed
nvme-fabrics: add queue setup helpers
tcp and rdma transports have lots of duplicate code setting up the different queue mappings. Add common helpers. Cc: Chaitanya Kulkarni <[email protected]> Reviewed-by: Christoph Hellwig <[email protected]> Signed-off-by: Keith Busch <[email protected]>
1 parent 4a4d9bc commit a249d30

File tree

4 files changed

+96
-162
lines changed

4 files changed

+96
-162
lines changed

drivers/nvme/host/fabrics.c

Lines changed: 76 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -957,6 +957,82 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
957957
return ret;
958958
}
959959

960+
void nvmf_set_io_queues(struct nvmf_ctrl_options *opts, u32 nr_io_queues,
961+
u32 io_queues[HCTX_MAX_TYPES])
962+
{
963+
if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
964+
/*
965+
* separate read/write queues
966+
* hand out dedicated default queues only after we have
967+
* sufficient read queues.
968+
*/
969+
io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
970+
nr_io_queues -= io_queues[HCTX_TYPE_READ];
971+
io_queues[HCTX_TYPE_DEFAULT] =
972+
min(opts->nr_write_queues, nr_io_queues);
973+
nr_io_queues -= io_queues[HCTX_TYPE_DEFAULT];
974+
} else {
975+
/*
976+
* shared read/write queues
977+
* either no write queues were requested, or we don't have
978+
* sufficient queue count to have dedicated default queues.
979+
*/
980+
io_queues[HCTX_TYPE_DEFAULT] =
981+
min(opts->nr_io_queues, nr_io_queues);
982+
nr_io_queues -= io_queues[HCTX_TYPE_DEFAULT];
983+
}
984+
985+
if (opts->nr_poll_queues && nr_io_queues) {
986+
/* map dedicated poll queues only if we have queues left */
987+
io_queues[HCTX_TYPE_POLL] =
988+
min(opts->nr_poll_queues, nr_io_queues);
989+
}
990+
}
991+
EXPORT_SYMBOL_GPL(nvmf_set_io_queues);
992+
993+
void nvmf_map_queues(struct blk_mq_tag_set *set, struct nvme_ctrl *ctrl,
994+
u32 io_queues[HCTX_MAX_TYPES])
995+
{
996+
struct nvmf_ctrl_options *opts = ctrl->opts;
997+
998+
if (opts->nr_write_queues && io_queues[HCTX_TYPE_READ]) {
999+
/* separate read/write queues */
1000+
set->map[HCTX_TYPE_DEFAULT].nr_queues =
1001+
io_queues[HCTX_TYPE_DEFAULT];
1002+
set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
1003+
set->map[HCTX_TYPE_READ].nr_queues =
1004+
io_queues[HCTX_TYPE_READ];
1005+
set->map[HCTX_TYPE_READ].queue_offset =
1006+
io_queues[HCTX_TYPE_DEFAULT];
1007+
} else {
1008+
/* shared read/write queues */
1009+
set->map[HCTX_TYPE_DEFAULT].nr_queues =
1010+
io_queues[HCTX_TYPE_DEFAULT];
1011+
set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
1012+
set->map[HCTX_TYPE_READ].nr_queues =
1013+
io_queues[HCTX_TYPE_DEFAULT];
1014+
set->map[HCTX_TYPE_READ].queue_offset = 0;
1015+
}
1016+
1017+
blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
1018+
blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
1019+
if (opts->nr_poll_queues && io_queues[HCTX_TYPE_POLL]) {
1020+
/* map dedicated poll queues only if we have queues left */
1021+
set->map[HCTX_TYPE_POLL].nr_queues = io_queues[HCTX_TYPE_POLL];
1022+
set->map[HCTX_TYPE_POLL].queue_offset =
1023+
io_queues[HCTX_TYPE_DEFAULT] +
1024+
io_queues[HCTX_TYPE_READ];
1025+
blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
1026+
}
1027+
1028+
dev_info(ctrl->device,
1029+
"mapped %d/%d/%d default/read/poll queues.\n",
1030+
io_queues[HCTX_TYPE_DEFAULT],
1031+
io_queues[HCTX_TYPE_READ],
1032+
io_queues[HCTX_TYPE_POLL]);
1033+
}
1034+
EXPORT_SYMBOL_GPL(nvmf_map_queues);
1035+
9601036
static int nvmf_check_required_opts(struct nvmf_ctrl_options *opts,
9611037
unsigned int required_opts)
9621038
{

drivers/nvme/host/fabrics.h

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -203,6 +203,13 @@ static inline void nvmf_complete_timed_out_request(struct request *rq)
203203
}
204204
}
205205

206+
static inline unsigned int nvmf_nr_io_queues(struct nvmf_ctrl_options *opts)
207+
{
208+
return min(opts->nr_io_queues, num_online_cpus()) +
209+
min(opts->nr_write_queues, num_online_cpus()) +
210+
min(opts->nr_poll_queues, num_online_cpus());
211+
}
212+
206213
int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val);
207214
int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val);
208215
int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);
@@ -215,5 +222,9 @@ int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
215222
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
216223
bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
217224
struct nvmf_ctrl_options *opts);
225+
void nvmf_set_io_queues(struct nvmf_ctrl_options *opts, u32 nr_io_queues,
226+
u32 io_queues[HCTX_MAX_TYPES]);
227+
void nvmf_map_queues(struct blk_mq_tag_set *set, struct nvme_ctrl *ctrl,
228+
u32 io_queues[HCTX_MAX_TYPES]);
218229

219230
#endif /* _NVME_FABRICS_H */

drivers/nvme/host/rdma.c

Lines changed: 4 additions & 75 deletions
Original file line numberDiff line numberDiff line change
@@ -713,18 +713,10 @@ static int nvme_rdma_start_io_queues(struct nvme_rdma_ctrl *ctrl,
713713
static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
714714
{
715715
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
716-
struct ib_device *ibdev = ctrl->device->dev;
717-
unsigned int nr_io_queues, nr_default_queues;
718-
unsigned int nr_read_queues, nr_poll_queues;
716+
unsigned int nr_io_queues;
719717
int i, ret;
720718

721-
nr_read_queues = min_t(unsigned int, ibdev->num_comp_vectors,
722-
min(opts->nr_io_queues, num_online_cpus()));
723-
nr_default_queues = min_t(unsigned int, ibdev->num_comp_vectors,
724-
min(opts->nr_write_queues, num_online_cpus()));
725-
nr_poll_queues = min(opts->nr_poll_queues, num_online_cpus());
726-
nr_io_queues = nr_read_queues + nr_default_queues + nr_poll_queues;
727-
719+
nr_io_queues = nvmf_nr_io_queues(opts);
728720
ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
729721
if (ret)
730722
return ret;
@@ -739,34 +731,7 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
739731
dev_info(ctrl->ctrl.device,
740732
"creating %d I/O queues.\n", nr_io_queues);
741733

742-
if (opts->nr_write_queues && nr_read_queues < nr_io_queues) {
743-
/*
744-
* separate read/write queues
745-
* hand out dedicated default queues only after we have
746-
* sufficient read queues.
747-
*/
748-
ctrl->io_queues[HCTX_TYPE_READ] = nr_read_queues;
749-
nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
750-
ctrl->io_queues[HCTX_TYPE_DEFAULT] =
751-
min(nr_default_queues, nr_io_queues);
752-
nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
753-
} else {
754-
/*
755-
* shared read/write queues
756-
* either no write queues were requested, or we don't have
757-
* sufficient queue count to have dedicated default queues.
758-
*/
759-
ctrl->io_queues[HCTX_TYPE_DEFAULT] =
760-
min(nr_read_queues, nr_io_queues);
761-
nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
762-
}
763-
764-
if (opts->nr_poll_queues && nr_io_queues) {
765-
/* map dedicated poll queues only if we have queues left */
766-
ctrl->io_queues[HCTX_TYPE_POLL] =
767-
min(nr_poll_queues, nr_io_queues);
768-
}
769-
734+
nvmf_set_io_queues(opts, nr_io_queues, ctrl->io_queues);
770735
for (i = 1; i < ctrl->ctrl.queue_count; i++) {
771736
ret = nvme_rdma_alloc_queue(ctrl, i,
772737
ctrl->ctrl.sqsize + 1);
@@ -2138,44 +2103,8 @@ static void nvme_rdma_complete_rq(struct request *rq)
21382103
static void nvme_rdma_map_queues(struct blk_mq_tag_set *set)
21392104
{
21402105
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(set->driver_data);
2141-
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
21422106

2143-
if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
2144-
/* separate read/write queues */
2145-
set->map[HCTX_TYPE_DEFAULT].nr_queues =
2146-
ctrl->io_queues[HCTX_TYPE_DEFAULT];
2147-
set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2148-
set->map[HCTX_TYPE_READ].nr_queues =
2149-
ctrl->io_queues[HCTX_TYPE_READ];
2150-
set->map[HCTX_TYPE_READ].queue_offset =
2151-
ctrl->io_queues[HCTX_TYPE_DEFAULT];
2152-
} else {
2153-
/* shared read/write queues */
2154-
set->map[HCTX_TYPE_DEFAULT].nr_queues =
2155-
ctrl->io_queues[HCTX_TYPE_DEFAULT];
2156-
set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2157-
set->map[HCTX_TYPE_READ].nr_queues =
2158-
ctrl->io_queues[HCTX_TYPE_DEFAULT];
2159-
set->map[HCTX_TYPE_READ].queue_offset = 0;
2160-
}
2161-
blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
2162-
blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
2163-
2164-
if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
2165-
/* map dedicated poll queues only if we have queues left */
2166-
set->map[HCTX_TYPE_POLL].nr_queues =
2167-
ctrl->io_queues[HCTX_TYPE_POLL];
2168-
set->map[HCTX_TYPE_POLL].queue_offset =
2169-
ctrl->io_queues[HCTX_TYPE_DEFAULT] +
2170-
ctrl->io_queues[HCTX_TYPE_READ];
2171-
blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
2172-
}
2173-
2174-
dev_info(ctrl->ctrl.device,
2175-
"mapped %d/%d/%d default/read/poll queues.\n",
2176-
ctrl->io_queues[HCTX_TYPE_DEFAULT],
2177-
ctrl->io_queues[HCTX_TYPE_READ],
2178-
ctrl->io_queues[HCTX_TYPE_POLL]);
2107+
nvmf_map_queues(set, &ctrl->ctrl, ctrl->io_queues);
21792108
}
21802109

21812110
static const struct blk_mq_ops nvme_rdma_mq_ops = {

drivers/nvme/host/tcp.c

Lines changed: 5 additions & 87 deletions
Original file line numberDiff line numberDiff line change
@@ -1802,58 +1802,12 @@ static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
18021802
return ret;
18031803
}
18041804

1805-
static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
1806-
{
1807-
unsigned int nr_io_queues;
1808-
1809-
nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
1810-
nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
1811-
nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus());
1812-
1813-
return nr_io_queues;
1814-
}
1815-
1816-
static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
1817-
unsigned int nr_io_queues)
1818-
{
1819-
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1820-
struct nvmf_ctrl_options *opts = nctrl->opts;
1821-
1822-
if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
1823-
/*
1824-
* separate read/write queues
1825-
* hand out dedicated default queues only after we have
1826-
* sufficient read queues.
1827-
*/
1828-
ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
1829-
nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
1830-
ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1831-
min(opts->nr_write_queues, nr_io_queues);
1832-
nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1833-
} else {
1834-
/*
1835-
* shared read/write queues
1836-
* either no write queues were requested, or we don't have
1837-
* sufficient queue count to have dedicated default queues.
1838-
*/
1839-
ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1840-
min(opts->nr_io_queues, nr_io_queues);
1841-
nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1842-
}
1843-
1844-
if (opts->nr_poll_queues && nr_io_queues) {
1845-
/* map dedicated poll queues only if we have queues left */
1846-
ctrl->io_queues[HCTX_TYPE_POLL] =
1847-
min(opts->nr_poll_queues, nr_io_queues);
1848-
}
1849-
}
1850-
18511805
static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
18521806
{
18531807
unsigned int nr_io_queues;
18541808
int ret;
18551809

1856-
nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
1810+
nr_io_queues = nvmf_nr_io_queues(ctrl->opts);
18571811
ret = nvme_set_queue_count(ctrl, &nr_io_queues);
18581812
if (ret)
18591813
return ret;
@@ -1868,8 +1822,8 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
18681822
dev_info(ctrl->device,
18691823
"creating %d I/O queues.\n", nr_io_queues);
18701824

1871-
nvme_tcp_set_io_queues(ctrl, nr_io_queues);
1872-
1825+
nvmf_set_io_queues(ctrl->opts, nr_io_queues,
1826+
to_tcp_ctrl(ctrl)->io_queues);
18731827
return __nvme_tcp_alloc_io_queues(ctrl);
18741828
}
18751829

@@ -2449,44 +2403,8 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
24492403
static void nvme_tcp_map_queues(struct blk_mq_tag_set *set)
24502404
{
24512405
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data);
2452-
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2453-
2454-
if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
2455-
/* separate read/write queues */
2456-
set->map[HCTX_TYPE_DEFAULT].nr_queues =
2457-
ctrl->io_queues[HCTX_TYPE_DEFAULT];
2458-
set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2459-
set->map[HCTX_TYPE_READ].nr_queues =
2460-
ctrl->io_queues[HCTX_TYPE_READ];
2461-
set->map[HCTX_TYPE_READ].queue_offset =
2462-
ctrl->io_queues[HCTX_TYPE_DEFAULT];
2463-
} else {
2464-
/* shared read/write queues */
2465-
set->map[HCTX_TYPE_DEFAULT].nr_queues =
2466-
ctrl->io_queues[HCTX_TYPE_DEFAULT];
2467-
set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2468-
set->map[HCTX_TYPE_READ].nr_queues =
2469-
ctrl->io_queues[HCTX_TYPE_DEFAULT];
2470-
set->map[HCTX_TYPE_READ].queue_offset = 0;
2471-
}
2472-
blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
2473-
blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
2474-
2475-
if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
2476-
/* map dedicated poll queues only if we have queues left */
2477-
set->map[HCTX_TYPE_POLL].nr_queues =
2478-
ctrl->io_queues[HCTX_TYPE_POLL];
2479-
set->map[HCTX_TYPE_POLL].queue_offset =
2480-
ctrl->io_queues[HCTX_TYPE_DEFAULT] +
2481-
ctrl->io_queues[HCTX_TYPE_READ];
2482-
blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
2483-
}
2484-
2485-
dev_info(ctrl->ctrl.device,
2486-
"mapped %d/%d/%d default/read/poll queues.\n",
2487-
ctrl->io_queues[HCTX_TYPE_DEFAULT],
2488-
ctrl->io_queues[HCTX_TYPE_READ],
2489-
ctrl->io_queues[HCTX_TYPE_POLL]);
2406+
2407+
nvmf_map_queues(set, &ctrl->ctrl, ctrl->io_queues);
24902408
}
24912409

24922410
static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)

0 commit comments

Comments
 (0)