@@ -213,6 +213,7 @@ struct blkfront_info
213
213
struct blk_mq_tag_set tag_set ;
214
214
struct blkfront_ring_info * rinfo ;
215
215
unsigned int nr_rings ;
216
+ unsigned int rinfo_size ;
216
217
/* Save uncomplete reqs and bios for migration. */
217
218
struct list_head requests ;
218
219
struct bio_list bio_list ;
@@ -259,6 +260,18 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
259
260
static void blkfront_gather_backend_features (struct blkfront_info * info );
260
261
static int negotiate_mq (struct blkfront_info * info );
261
262
263
+ #define for_each_rinfo (info , ptr , idx ) \
264
+ for ((ptr) = (info)->rinfo, (idx) = 0; \
265
+ (idx) < (info)->nr_rings; \
266
+ (idx)++, (ptr) = (void *)(ptr) + (info)->rinfo_size)
267
+
268
+ static inline struct blkfront_ring_info *
269
+ get_rinfo (const struct blkfront_info * info , unsigned int i )
270
+ {
271
+ BUG_ON (i >= info -> nr_rings );
272
+ return (void * )info -> rinfo + i * info -> rinfo_size ;
273
+ }
274
+
262
275
static int get_id_from_freelist (struct blkfront_ring_info * rinfo )
263
276
{
264
277
unsigned long free = rinfo -> shadow_free ;
@@ -883,8 +896,7 @@ static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
883
896
struct blkfront_info * info = hctx -> queue -> queuedata ;
884
897
struct blkfront_ring_info * rinfo = NULL ;
885
898
886
- BUG_ON (info -> nr_rings <= qid );
887
- rinfo = & info -> rinfo [qid ];
899
+ rinfo = get_rinfo (info , qid );
888
900
blk_mq_start_request (qd -> rq );
889
901
spin_lock_irqsave (& rinfo -> ring_lock , flags );
890
902
if (RING_FULL (& rinfo -> ring ))
@@ -1181,16 +1193,15 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
1181
1193
static void xlvbd_release_gendisk (struct blkfront_info * info )
1182
1194
{
1183
1195
unsigned int minor , nr_minors , i ;
1196
+ struct blkfront_ring_info * rinfo ;
1184
1197
1185
1198
if (info -> rq == NULL )
1186
1199
return ;
1187
1200
1188
1201
/* No more blkif_request(). */
1189
1202
blk_mq_stop_hw_queues (info -> rq );
1190
1203
1191
- for (i = 0 ; i < info -> nr_rings ; i ++ ) {
1192
- struct blkfront_ring_info * rinfo = & info -> rinfo [i ];
1193
-
1204
+ for_each_rinfo (info , rinfo , i ) {
1194
1205
/* No more gnttab callback work. */
1195
1206
gnttab_cancel_free_callback (& rinfo -> callback );
1196
1207
@@ -1339,6 +1350,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
1339
1350
static void blkif_free (struct blkfront_info * info , int suspend )
1340
1351
{
1341
1352
unsigned int i ;
1353
+ struct blkfront_ring_info * rinfo ;
1342
1354
1343
1355
/* Prevent new requests being issued until we fix things up. */
1344
1356
info -> connected = suspend ?
@@ -1347,8 +1359,8 @@ static void blkif_free(struct blkfront_info *info, int suspend)
1347
1359
if (info -> rq )
1348
1360
blk_mq_stop_hw_queues (info -> rq );
1349
1361
1350
- for ( i = 0 ; i < info -> nr_rings ; i ++ )
1351
- blkif_free_ring (& info -> rinfo [ i ] );
1362
+ for_each_rinfo ( info , rinfo , i )
1363
+ blkif_free_ring (rinfo );
1352
1364
1353
1365
kvfree (info -> rinfo );
1354
1366
info -> rinfo = NULL ;
@@ -1775,6 +1787,7 @@ static int talk_to_blkback(struct xenbus_device *dev,
1775
1787
int err ;
1776
1788
unsigned int i , max_page_order ;
1777
1789
unsigned int ring_page_order ;
1790
+ struct blkfront_ring_info * rinfo ;
1778
1791
1779
1792
if (!info )
1780
1793
return - ENODEV ;
@@ -1788,9 +1801,7 @@ static int talk_to_blkback(struct xenbus_device *dev,
1788
1801
if (err )
1789
1802
goto destroy_blkring ;
1790
1803
1791
- for (i = 0 ; i < info -> nr_rings ; i ++ ) {
1792
- struct blkfront_ring_info * rinfo = & info -> rinfo [i ];
1793
-
1804
+ for_each_rinfo (info , rinfo , i ) {
1794
1805
/* Create shared ring, alloc event channel. */
1795
1806
err = setup_blkring (dev , rinfo );
1796
1807
if (err )
@@ -1815,7 +1826,7 @@ static int talk_to_blkback(struct xenbus_device *dev,
1815
1826
1816
1827
/* We already got the number of queues/rings in _probe */
1817
1828
if (info -> nr_rings == 1 ) {
1818
- err = write_per_ring_nodes (xbt , & info -> rinfo [ 0 ] , dev -> nodename );
1829
+ err = write_per_ring_nodes (xbt , info -> rinfo , dev -> nodename );
1819
1830
if (err )
1820
1831
goto destroy_blkring ;
1821
1832
} else {
@@ -1837,10 +1848,10 @@ static int talk_to_blkback(struct xenbus_device *dev,
1837
1848
goto abort_transaction ;
1838
1849
}
1839
1850
1840
- for ( i = 0 ; i < info -> nr_rings ; i ++ ) {
1851
+ for_each_rinfo ( info , rinfo , i ) {
1841
1852
memset (path , 0 , pathsize );
1842
1853
snprintf (path , pathsize , "%s/queue-%u" , dev -> nodename , i );
1843
- err = write_per_ring_nodes (xbt , & info -> rinfo [ i ] , path );
1854
+ err = write_per_ring_nodes (xbt , rinfo , path );
1844
1855
if (err ) {
1845
1856
kfree (path );
1846
1857
goto destroy_blkring ;
@@ -1868,9 +1879,8 @@ static int talk_to_blkback(struct xenbus_device *dev,
1868
1879
goto destroy_blkring ;
1869
1880
}
1870
1881
1871
- for ( i = 0 ; i < info -> nr_rings ; i ++ ) {
1882
+ for_each_rinfo ( info , rinfo , i ) {
1872
1883
unsigned int j ;
1873
- struct blkfront_ring_info * rinfo = & info -> rinfo [i ];
1874
1884
1875
1885
for (j = 0 ; j < BLK_RING_SIZE (info ); j ++ )
1876
1886
rinfo -> shadow [j ].req .u .rw .id = j + 1 ;
@@ -1900,6 +1910,7 @@ static int negotiate_mq(struct blkfront_info *info)
1900
1910
{
1901
1911
unsigned int backend_max_queues ;
1902
1912
unsigned int i ;
1913
+ struct blkfront_ring_info * rinfo ;
1903
1914
1904
1915
BUG_ON (info -> nr_rings );
1905
1916
@@ -1911,20 +1922,16 @@ static int negotiate_mq(struct blkfront_info *info)
1911
1922
if (!info -> nr_rings )
1912
1923
info -> nr_rings = 1 ;
1913
1924
1914
- info -> rinfo = kvcalloc (info -> nr_rings ,
1915
- struct_size (info -> rinfo , shadow ,
1916
- BLK_RING_SIZE (info )),
1917
- GFP_KERNEL );
1925
+ info -> rinfo_size = struct_size (info -> rinfo , shadow ,
1926
+ BLK_RING_SIZE (info ));
1927
+ info -> rinfo = kvcalloc (info -> nr_rings , info -> rinfo_size , GFP_KERNEL );
1918
1928
if (!info -> rinfo ) {
1919
1929
xenbus_dev_fatal (info -> xbdev , - ENOMEM , "allocating ring_info structure" );
1920
1930
info -> nr_rings = 0 ;
1921
1931
return - ENOMEM ;
1922
1932
}
1923
1933
1924
- for (i = 0 ; i < info -> nr_rings ; i ++ ) {
1925
- struct blkfront_ring_info * rinfo ;
1926
-
1927
- rinfo = & info -> rinfo [i ];
1934
+ for_each_rinfo (info , rinfo , i ) {
1928
1935
INIT_LIST_HEAD (& rinfo -> indirect_pages );
1929
1936
INIT_LIST_HEAD (& rinfo -> grants );
1930
1937
rinfo -> dev_info = info ;
@@ -2017,16 +2024,15 @@ static int blkif_recover(struct blkfront_info *info)
2017
2024
int rc ;
2018
2025
struct bio * bio ;
2019
2026
unsigned int segs ;
2027
+ struct blkfront_ring_info * rinfo ;
2020
2028
2021
2029
blkfront_gather_backend_features (info );
2022
2030
/* Reset limits changed by blk_mq_update_nr_hw_queues(). */
2023
2031
blkif_set_queue_limits (info );
2024
2032
segs = info -> max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST ;
2025
2033
blk_queue_max_segments (info -> rq , segs / GRANTS_PER_PSEG );
2026
2034
2027
- for (r_index = 0 ; r_index < info -> nr_rings ; r_index ++ ) {
2028
- struct blkfront_ring_info * rinfo = & info -> rinfo [r_index ];
2029
-
2035
+ for_each_rinfo (info , rinfo , r_index ) {
2030
2036
rc = blkfront_setup_indirect (rinfo );
2031
2037
if (rc )
2032
2038
return rc ;
@@ -2036,10 +2042,7 @@ static int blkif_recover(struct blkfront_info *info)
2036
2042
/* Now safe for us to use the shared ring */
2037
2043
info -> connected = BLKIF_STATE_CONNECTED ;
2038
2044
2039
- for (r_index = 0 ; r_index < info -> nr_rings ; r_index ++ ) {
2040
- struct blkfront_ring_info * rinfo ;
2041
-
2042
- rinfo = & info -> rinfo [r_index ];
2045
+ for_each_rinfo (info , rinfo , r_index ) {
2043
2046
/* Kick any other new requests queued since we resumed */
2044
2047
kick_pending_request_queues (rinfo );
2045
2048
}
@@ -2072,13 +2075,13 @@ static int blkfront_resume(struct xenbus_device *dev)
2072
2075
struct blkfront_info * info = dev_get_drvdata (& dev -> dev );
2073
2076
int err = 0 ;
2074
2077
unsigned int i , j ;
2078
+ struct blkfront_ring_info * rinfo ;
2075
2079
2076
2080
dev_dbg (& dev -> dev , "blkfront_resume: %s\n" , dev -> nodename );
2077
2081
2078
2082
bio_list_init (& info -> bio_list );
2079
2083
INIT_LIST_HEAD (& info -> requests );
2080
- for (i = 0 ; i < info -> nr_rings ; i ++ ) {
2081
- struct blkfront_ring_info * rinfo = & info -> rinfo [i ];
2084
+ for_each_rinfo (info , rinfo , i ) {
2082
2085
struct bio_list merge_bio ;
2083
2086
struct blk_shadow * shadow = rinfo -> shadow ;
2084
2087
@@ -2337,6 +2340,7 @@ static void blkfront_connect(struct blkfront_info *info)
2337
2340
unsigned int binfo ;
2338
2341
char * envp [] = { "RESIZE=1" , NULL };
2339
2342
int err , i ;
2343
+ struct blkfront_ring_info * rinfo ;
2340
2344
2341
2345
switch (info -> connected ) {
2342
2346
case BLKIF_STATE_CONNECTED :
@@ -2394,8 +2398,8 @@ static void blkfront_connect(struct blkfront_info *info)
2394
2398
"physical-sector-size" ,
2395
2399
sector_size );
2396
2400
blkfront_gather_backend_features (info );
2397
- for ( i = 0 ; i < info -> nr_rings ; i ++ ) {
2398
- err = blkfront_setup_indirect (& info -> rinfo [ i ] );
2401
+ for_each_rinfo ( info , rinfo , i ) {
2402
+ err = blkfront_setup_indirect (rinfo );
2399
2403
if (err ) {
2400
2404
xenbus_dev_fatal (info -> xbdev , err , "setup_indirect at %s" ,
2401
2405
info -> xbdev -> otherend );
@@ -2416,8 +2420,8 @@ static void blkfront_connect(struct blkfront_info *info)
2416
2420
2417
2421
/* Kick pending requests. */
2418
2422
info -> connected = BLKIF_STATE_CONNECTED ;
2419
- for ( i = 0 ; i < info -> nr_rings ; i ++ )
2420
- kick_pending_request_queues (& info -> rinfo [ i ] );
2423
+ for_each_rinfo ( info , rinfo , i )
2424
+ kick_pending_request_queues (rinfo );
2421
2425
2422
2426
device_add_disk (& info -> xbdev -> dev , info -> gd , NULL );
2423
2427
@@ -2652,9 +2656,9 @@ static void purge_persistent_grants(struct blkfront_info *info)
2652
2656
{
2653
2657
unsigned int i ;
2654
2658
unsigned long flags ;
2659
+ struct blkfront_ring_info * rinfo ;
2655
2660
2656
- for (i = 0 ; i < info -> nr_rings ; i ++ ) {
2657
- struct blkfront_ring_info * rinfo = & info -> rinfo [i ];
2661
+ for_each_rinfo (info , rinfo , i ) {
2658
2662
struct grant * gnt_list_entry , * tmp ;
2659
2663
2660
2664
spin_lock_irqsave (& rinfo -> ring_lock , flags );
0 commit comments