@@ -1295,30 +1295,39 @@ static void fixup_discard_support(struct drbd_device *device, struct request_que
1295
1295
}
1296
1296
}
1297
1297
1298
+ /* This is the workaround for "bio would need to, but cannot, be split" */
1299
+ static unsigned int drbd_backing_dev_max_segments (struct drbd_device * device )
1300
+ {
1301
+ unsigned int max_segments ;
1302
+
1303
+ rcu_read_lock ();
1304
+ max_segments = rcu_dereference (device -> ldev -> disk_conf )-> max_bio_bvecs ;
1305
+ rcu_read_unlock ();
1306
+
1307
+ if (!max_segments )
1308
+ return BLK_MAX_SEGMENTS ;
1309
+ return max_segments ;
1310
+ }
1311
+
1298
1312
static void drbd_setup_queue_param (struct drbd_device * device , struct drbd_backing_dev * bdev ,
1299
1313
unsigned int max_bio_size , struct o_qlim * o )
1300
1314
{
1301
1315
struct request_queue * const q = device -> rq_queue ;
1302
1316
unsigned int max_hw_sectors = max_bio_size >> 9 ;
1303
- unsigned int max_segments = 0 ;
1317
+ unsigned int max_segments = BLK_MAX_SEGMENTS ;
1304
1318
struct request_queue * b = NULL ;
1305
- struct disk_conf * dc ;
1306
1319
1307
1320
if (bdev ) {
1308
1321
b = bdev -> backing_bdev -> bd_disk -> queue ;
1309
1322
1310
1323
max_hw_sectors = min (queue_max_hw_sectors (b ), max_bio_size >> 9 );
1311
- rcu_read_lock ();
1312
- dc = rcu_dereference (device -> ldev -> disk_conf );
1313
- max_segments = dc -> max_bio_bvecs ;
1314
- rcu_read_unlock ();
1324
+ max_segments = drbd_backing_dev_max_segments (device );
1315
1325
1316
1326
blk_set_stacking_limits (& q -> limits );
1317
1327
}
1318
1328
1319
1329
blk_queue_max_hw_sectors (q , max_hw_sectors );
1320
- /* This is the workaround for "bio would need to, but cannot, be split" */
1321
- blk_queue_max_segments (q , max_segments ? max_segments : BLK_MAX_SEGMENTS );
1330
+ blk_queue_max_segments (q , max_segments );
1322
1331
blk_queue_segment_boundary (q , PAGE_SIZE - 1 );
1323
1332
decide_on_discard_support (device , bdev );
1324
1333
0 commit comments