Skip to content

Commit e6dfe74

Browse files
Christoph Hellwigaxboe
authored andcommitted
drbd: atomically update queue limits in drbd_reconsider_queue_parameters
Switch drbd_reconsider_queue_parameters to set up the queue parameters in an on-stack queue_limits structure and apply the atomically. Remove various helpers that have become so trivial that they can be folded into drbd_reconsider_queue_parameters. Signed-off-by: Christoph Hellwig <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]>
1 parent 5eaee6e commit e6dfe74

File tree

1 file changed

+46
-73
lines changed

1 file changed

+46
-73
lines changed

drivers/block/drbd/drbd_nl.c

Lines changed: 46 additions & 73 deletions
Original file line numberDiff line numberDiff line change
@@ -1216,11 +1216,6 @@ static unsigned int drbd_max_peer_bio_size(struct drbd_device *device)
12161216
return DRBD_MAX_BIO_SIZE;
12171217
}
12181218

1219-
static void blk_queue_discard_granularity(struct request_queue *q, unsigned int granularity)
1220-
{
1221-
q->limits.discard_granularity = granularity;
1222-
}
1223-
12241219
static unsigned int drbd_max_discard_sectors(struct drbd_connection *connection)
12251220
{
12261221
/* when we introduced REQ_WRITE_SAME support, we also bumped
@@ -1247,62 +1242,6 @@ static bool drbd_discard_supported(struct drbd_connection *connection,
12471242
return true;
12481243
}
12491244

1250-
static void decide_on_discard_support(struct drbd_device *device,
1251-
struct drbd_backing_dev *bdev)
1252-
{
1253-
struct drbd_connection *connection =
1254-
first_peer_device(device)->connection;
1255-
struct request_queue *q = device->rq_queue;
1256-
unsigned int max_discard_sectors;
1257-
1258-
if (!drbd_discard_supported(connection, bdev))
1259-
goto not_supported;
1260-
1261-
/*
1262-
* We don't care for the granularity, really.
1263-
*
1264-
* Stacking limits below should fix it for the local device. Whether or
1265-
* not it is a suitable granularity on the remote device is not our
1266-
* problem, really. If you care, you need to use devices with similar
1267-
* topology on all peers.
1268-
*/
1269-
blk_queue_discard_granularity(q, 512);
1270-
max_discard_sectors = drbd_max_discard_sectors(connection);
1271-
blk_queue_max_discard_sectors(q, max_discard_sectors);
1272-
return;
1273-
1274-
not_supported:
1275-
blk_queue_discard_granularity(q, 0);
1276-
blk_queue_max_discard_sectors(q, 0);
1277-
}
1278-
1279-
static void fixup_write_zeroes(struct drbd_device *device, struct request_queue *q)
1280-
{
1281-
/* Fixup max_write_zeroes_sectors after blk_stack_limits():
1282-
* if we can handle "zeroes" efficiently on the protocol,
1283-
* we want to do that, even if our backend does not announce
1284-
* max_write_zeroes_sectors itself. */
1285-
struct drbd_connection *connection = first_peer_device(device)->connection;
1286-
/* If the peer announces WZEROES support, use it. Otherwise, rather
1287-
* send explicit zeroes than rely on some discard-zeroes-data magic. */
1288-
if (connection->agreed_features & DRBD_FF_WZEROES)
1289-
q->limits.max_write_zeroes_sectors = DRBD_MAX_BBIO_SECTORS;
1290-
else
1291-
q->limits.max_write_zeroes_sectors = 0;
1292-
}
1293-
1294-
static void fixup_discard_support(struct drbd_device *device, struct request_queue *q)
1295-
{
1296-
unsigned int max_discard = device->rq_queue->limits.max_discard_sectors;
1297-
unsigned int discard_granularity =
1298-
device->rq_queue->limits.discard_granularity >> SECTOR_SHIFT;
1299-
1300-
if (discard_granularity > max_discard) {
1301-
blk_queue_discard_granularity(q, 0);
1302-
blk_queue_max_discard_sectors(q, 0);
1303-
}
1304-
}
1305-
13061245
/* This is the workaround for "bio would need to, but cannot, be split" */
13071246
static unsigned int drbd_backing_dev_max_segments(struct drbd_device *device)
13081247
{
@@ -1320,8 +1259,11 @@ static unsigned int drbd_backing_dev_max_segments(struct drbd_device *device)
13201259
void drbd_reconsider_queue_parameters(struct drbd_device *device,
13211260
struct drbd_backing_dev *bdev, struct o_qlim *o)
13221261
{
1262+
struct drbd_connection *connection =
1263+
first_peer_device(device)->connection;
13231264
struct request_queue * const q = device->rq_queue;
13241265
unsigned int now = queue_max_hw_sectors(q) << 9;
1266+
struct queue_limits lim;
13251267
struct request_queue *b = NULL;
13261268
unsigned int new;
13271269

@@ -1348,24 +1290,55 @@ void drbd_reconsider_queue_parameters(struct drbd_device *device,
13481290
drbd_info(device, "max BIO size = %u\n", new);
13491291
}
13501292

1293+
lim = queue_limits_start_update(q);
13511294
if (bdev) {
1352-
blk_set_stacking_limits(&q->limits);
1353-
blk_queue_max_segments(q,
1354-
drbd_backing_dev_max_segments(device));
1295+
blk_set_stacking_limits(&lim);
1296+
lim.max_segments = drbd_backing_dev_max_segments(device);
13551297
} else {
1356-
blk_queue_max_segments(q, BLK_MAX_SEGMENTS);
1298+
lim.max_segments = BLK_MAX_SEGMENTS;
13571299
}
13581300

1359-
blk_queue_max_hw_sectors(q, new >> SECTOR_SHIFT);
1360-
blk_queue_segment_boundary(q, PAGE_SIZE - 1);
1361-
decide_on_discard_support(device, bdev);
1301+
lim.max_hw_sectors = new >> SECTOR_SHIFT;
1302+
lim.seg_boundary_mask = PAGE_SIZE - 1;
13621303

1363-
if (bdev) {
1364-
blk_stack_limits(&q->limits, &b->limits, 0);
1365-
disk_update_readahead(device->vdisk);
1304+
/*
1305+
* We don't care for the granularity, really.
1306+
*
1307+
* Stacking limits below should fix it for the local device. Whether or
1308+
* not it is a suitable granularity on the remote device is not our
1309+
* problem, really. If you care, you need to use devices with similar
1310+
* topology on all peers.
1311+
*/
1312+
if (drbd_discard_supported(connection, bdev)) {
1313+
lim.discard_granularity = 512;
1314+
lim.max_hw_discard_sectors =
1315+
drbd_max_discard_sectors(connection);
1316+
} else {
1317+
lim.discard_granularity = 0;
1318+
lim.max_hw_discard_sectors = 0;
13661319
}
1367-
fixup_write_zeroes(device, q);
1368-
fixup_discard_support(device, q);
1320+
1321+
if (bdev)
1322+
blk_stack_limits(&lim, &b->limits, 0);
1323+
1324+
/*
1325+
* If we can handle "zeroes" efficiently on the protocol, we want to do
1326+
* that, even if our backend does not announce max_write_zeroes_sectors
1327+
* itself.
1328+
*/
1329+
if (connection->agreed_features & DRBD_FF_WZEROES)
1330+
lim.max_write_zeroes_sectors = DRBD_MAX_BBIO_SECTORS;
1331+
else
1332+
lim.max_write_zeroes_sectors = 0;
1333+
1334+
if ((lim.discard_granularity >> SECTOR_SHIFT) >
1335+
lim.max_hw_discard_sectors) {
1336+
lim.discard_granularity = 0;
1337+
lim.max_hw_discard_sectors = 0;
1338+
}
1339+
1340+
if (queue_limits_commit_update(q, &lim))
1341+
drbd_err(device, "setting new queue limits failed\n");
13691342
}
13701343

13711344
/* Starts the worker thread */

0 commit comments

Comments
 (0)