Skip to content

Commit ba3f67c

Browse files
Christoph Hellwigaxboe
authored andcommitted
xen-blkfront: atomically update queue limits
Pass the initial queue limits to blk_mq_alloc_disk and use the blkif_set_queue_limits API to update the limits on reconnect. Signed-off-by: Christoph Hellwig <[email protected]> Acked-by: Roger Pau Monné <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]>
1 parent 4f81b87 commit ba3f67c

File tree

1 file changed

+23
-18
lines changed

1 file changed

+23
-18
lines changed

drivers/block/xen-blkfront.c

Lines changed: 23 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -941,37 +941,35 @@ static const struct blk_mq_ops blkfront_mq_ops = {
941941
.complete = blkif_complete_rq,
942942
};
943943

944-
static void blkif_set_queue_limits(struct blkfront_info *info)
944+
static void blkif_set_queue_limits(const struct blkfront_info *info,
945+
struct queue_limits *lim)
945946
{
946-
struct request_queue *rq = info->rq;
947947
unsigned int segments = info->max_indirect_segments ? :
948948
BLKIF_MAX_SEGMENTS_PER_REQUEST;
949949

950-
blk_queue_flag_set(QUEUE_FLAG_VIRT, rq);
951-
952950
if (info->feature_discard) {
953-
blk_queue_max_discard_sectors(rq, UINT_MAX);
951+
lim->max_hw_discard_sectors = UINT_MAX;
954952
if (info->discard_granularity)
955-
rq->limits.discard_granularity = info->discard_granularity;
956-
rq->limits.discard_alignment = info->discard_alignment;
953+
lim->discard_granularity = info->discard_granularity;
954+
lim->discard_alignment = info->discard_alignment;
957955
if (info->feature_secdiscard)
958-
blk_queue_max_secure_erase_sectors(rq, UINT_MAX);
956+
lim->max_secure_erase_sectors = UINT_MAX;
959957
}
960958

961959
/* Hard sector size and max sectors impersonate the equiv. hardware. */
962-
blk_queue_logical_block_size(rq, info->sector_size);
963-
blk_queue_physical_block_size(rq, info->physical_sector_size);
964-
blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
960+
lim->logical_block_size = info->sector_size;
961+
lim->physical_block_size = info->physical_sector_size;
962+
lim->max_hw_sectors = (segments * XEN_PAGE_SIZE) / 512;
965963

966964
/* Each segment in a request is up to an aligned page in size. */
967-
blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
968-
blk_queue_max_segment_size(rq, PAGE_SIZE);
965+
lim->seg_boundary_mask = PAGE_SIZE - 1;
966+
lim->max_segment_size = PAGE_SIZE;
969967

970968
/* Ensure a merged request will fit in a single I/O ring slot. */
971-
blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG);
969+
lim->max_segments = segments / GRANTS_PER_PSEG;
972970

973971
/* Make sure buffer addresses are sector-aligned. */
974-
blk_queue_dma_alignment(rq, 511);
972+
lim->dma_alignment = 511;
975973
}
976974

977975
static const char *flush_info(struct blkfront_info *info)
@@ -1068,6 +1066,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
10681066
struct blkfront_info *info, u16 sector_size,
10691067
unsigned int physical_sector_size)
10701068
{
1069+
struct queue_limits lim = {};
10711070
struct gendisk *gd;
10721071
int nr_minors = 1;
10731072
int err;
@@ -1134,11 +1133,13 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
11341133
if (err)
11351134
goto out_release_minors;
11361135

1137-
gd = blk_mq_alloc_disk(&info->tag_set, NULL, info);
1136+
blkif_set_queue_limits(info, &lim);
1137+
gd = blk_mq_alloc_disk(&info->tag_set, &lim, info);
11381138
if (IS_ERR(gd)) {
11391139
err = PTR_ERR(gd);
11401140
goto out_free_tag_set;
11411141
}
1142+
blk_queue_flag_set(QUEUE_FLAG_VIRT, gd->queue);
11421143

11431144
strcpy(gd->disk_name, DEV_NAME);
11441145
ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset);
@@ -1160,7 +1161,6 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
11601161
info->gd = gd;
11611162
info->sector_size = sector_size;
11621163
info->physical_sector_size = physical_sector_size;
1163-
blkif_set_queue_limits(info);
11641164

11651165
xlvbd_flush(info);
11661166

@@ -2004,14 +2004,19 @@ static int blkfront_probe(struct xenbus_device *dev,
20042004

20052005
static int blkif_recover(struct blkfront_info *info)
20062006
{
2007+
struct queue_limits lim;
20072008
unsigned int r_index;
20082009
struct request *req, *n;
20092010
int rc;
20102011
struct bio *bio;
20112012
struct blkfront_ring_info *rinfo;
20122013

2014+
lim = queue_limits_start_update(info->rq);
20132015
blkfront_gather_backend_features(info);
2014-
blkif_set_queue_limits(info);
2016+
blkif_set_queue_limits(info, &lim);
2017+
rc = queue_limits_commit_update(info->rq, &lim);
2018+
if (rc)
2019+
return rc;
20152020

20162021
for_each_rinfo(info, rinfo, r_index) {
20172022
rc = blkfront_setup_indirect(rinfo);

0 commit comments

Comments
 (0)