@@ -941,37 +941,35 @@ static const struct blk_mq_ops blkfront_mq_ops = {
941
941
.complete = blkif_complete_rq ,
942
942
};
943
943
944
- static void blkif_set_queue_limits (struct blkfront_info * info )
944
+ static void blkif_set_queue_limits (const struct blkfront_info * info ,
945
+ struct queue_limits * lim )
945
946
{
946
- struct request_queue * rq = info -> rq ;
947
947
unsigned int segments = info -> max_indirect_segments ? :
948
948
BLKIF_MAX_SEGMENTS_PER_REQUEST ;
949
949
950
- blk_queue_flag_set (QUEUE_FLAG_VIRT , rq );
951
-
952
950
if (info -> feature_discard ) {
953
- blk_queue_max_discard_sectors ( rq , UINT_MAX ) ;
951
+ lim -> max_hw_discard_sectors = UINT_MAX ;
954
952
if (info -> discard_granularity )
955
- rq -> limits . discard_granularity = info -> discard_granularity ;
956
- rq -> limits . discard_alignment = info -> discard_alignment ;
953
+ lim -> discard_granularity = info -> discard_granularity ;
954
+ lim -> discard_alignment = info -> discard_alignment ;
957
955
if (info -> feature_secdiscard )
958
- blk_queue_max_secure_erase_sectors ( rq , UINT_MAX ) ;
956
+ lim -> max_secure_erase_sectors = UINT_MAX ;
959
957
}
960
958
961
959
/* Hard sector size and max sectors impersonate the equiv. hardware. */
962
- blk_queue_logical_block_size ( rq , info -> sector_size ) ;
963
- blk_queue_physical_block_size ( rq , info -> physical_sector_size ) ;
964
- blk_queue_max_hw_sectors ( rq , (segments * XEN_PAGE_SIZE ) / 512 ) ;
960
+ lim -> logical_block_size = info -> sector_size ;
961
+ lim -> physical_block_size = info -> physical_sector_size ;
962
+ lim -> max_hw_sectors = (segments * XEN_PAGE_SIZE ) / 512 ;
965
963
966
964
/* Each segment in a request is up to an aligned page in size. */
967
- blk_queue_segment_boundary ( rq , PAGE_SIZE - 1 ) ;
968
- blk_queue_max_segment_size ( rq , PAGE_SIZE ) ;
965
+ lim -> seg_boundary_mask = PAGE_SIZE - 1 ;
966
+ lim -> max_segment_size = PAGE_SIZE ;
969
967
970
968
/* Ensure a merged request will fit in a single I/O ring slot. */
971
- blk_queue_max_segments ( rq , segments / GRANTS_PER_PSEG ) ;
969
+ lim -> max_segments = segments / GRANTS_PER_PSEG ;
972
970
973
971
/* Make sure buffer addresses are sector-aligned. */
974
- blk_queue_dma_alignment ( rq , 511 ) ;
972
+ lim -> dma_alignment = 511 ;
975
973
}
976
974
977
975
static const char * flush_info (struct blkfront_info * info )
@@ -1068,6 +1066,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
1068
1066
struct blkfront_info * info , u16 sector_size ,
1069
1067
unsigned int physical_sector_size )
1070
1068
{
1069
+ struct queue_limits lim = {};
1071
1070
struct gendisk * gd ;
1072
1071
int nr_minors = 1 ;
1073
1072
int err ;
@@ -1134,11 +1133,13 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
1134
1133
if (err )
1135
1134
goto out_release_minors ;
1136
1135
1137
- gd = blk_mq_alloc_disk (& info -> tag_set , NULL , info );
1136
+ blkif_set_queue_limits (info , & lim );
1137
+ gd = blk_mq_alloc_disk (& info -> tag_set , & lim , info );
1138
1138
if (IS_ERR (gd )) {
1139
1139
err = PTR_ERR (gd );
1140
1140
goto out_free_tag_set ;
1141
1141
}
1142
+ blk_queue_flag_set (QUEUE_FLAG_VIRT , gd -> queue );
1142
1143
1143
1144
strcpy (gd -> disk_name , DEV_NAME );
1144
1145
ptr = encode_disk_name (gd -> disk_name + sizeof (DEV_NAME ) - 1 , offset );
@@ -1160,7 +1161,6 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
1160
1161
info -> gd = gd ;
1161
1162
info -> sector_size = sector_size ;
1162
1163
info -> physical_sector_size = physical_sector_size ;
1163
- blkif_set_queue_limits (info );
1164
1164
1165
1165
xlvbd_flush (info );
1166
1166
@@ -2004,14 +2004,19 @@ static int blkfront_probe(struct xenbus_device *dev,
2004
2004
2005
2005
static int blkif_recover (struct blkfront_info * info )
2006
2006
{
2007
+ struct queue_limits lim ;
2007
2008
unsigned int r_index ;
2008
2009
struct request * req , * n ;
2009
2010
int rc ;
2010
2011
struct bio * bio ;
2011
2012
struct blkfront_ring_info * rinfo ;
2012
2013
2014
+ lim = queue_limits_start_update (info -> rq );
2013
2015
blkfront_gather_backend_features (info );
2014
- blkif_set_queue_limits (info );
2016
+ blkif_set_queue_limits (info , & lim );
2017
+ rc = queue_limits_commit_update (info -> rq , & lim );
2018
+ if (rc )
2019
+ return rc ;
2015
2020
2016
2021
for_each_rinfo (info , rinfo , r_index ) {
2017
2022
rc = blkfront_setup_indirect (rinfo );
0 commit comments