Skip to content

Commit f92141e

Browse files
Merge patch series "convert SCSI to atomic queue limits, part 1 (v3)"
Christoph Hellwig <[email protected]> says: Hi all, this series converts the SCSI midlayer and LLDDs to use atomic queue limits API. It is pretty straight forward, except for the mpt3mr driver which does really weird and probably already broken things by setting limits from unlocked device iteration callbacks. I will probably defer the (more complicated) ULD changes to the next merge window as they would heavily conflict with Damien's zone write plugging series. With that the series could go in through the SCSI tree if Jens' ACKs the core block layer bits. Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Martin K. Petersen <[email protected]>
2 parents 6d97e80 + ec84ca4 commit f92141e

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

61 files changed

+344
-592
lines changed

block/blk-settings.c

Lines changed: 0 additions & 245 deletions
Original file line numberDiff line numberDiff line change
@@ -284,72 +284,6 @@ int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
284284
}
285285
EXPORT_SYMBOL_GPL(queue_limits_set);
286286

287-
/**
288-
* blk_queue_bounce_limit - set bounce buffer limit for queue
289-
* @q: the request queue for the device
290-
* @bounce: bounce limit to enforce
291-
*
292-
* Description:
293-
* Force bouncing for ISA DMA ranges or highmem.
294-
*
295-
* DEPRECATED, don't use in new code.
296-
**/
297-
void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce)
298-
{
299-
q->limits.bounce = bounce;
300-
}
301-
EXPORT_SYMBOL(blk_queue_bounce_limit);
302-
303-
/**
304-
* blk_queue_max_hw_sectors - set max sectors for a request for this queue
305-
* @q: the request queue for the device
306-
* @max_hw_sectors: max hardware sectors in the usual 512b unit
307-
*
308-
* Description:
309-
* Enables a low level driver to set a hard upper limit,
310-
* max_hw_sectors, on the size of requests. max_hw_sectors is set by
311-
* the device driver based upon the capabilities of the I/O
312-
* controller.
313-
*
314-
* max_dev_sectors is a hard limit imposed by the storage device for
315-
* READ/WRITE requests. It is set by the disk driver.
316-
*
317-
* max_sectors is a soft limit imposed by the block layer for
318-
* filesystem type requests. This value can be overridden on a
319-
* per-device basis in /sys/block/<device>/queue/max_sectors_kb.
320-
* The soft limit can not exceed max_hw_sectors.
321-
**/
322-
void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
323-
{
324-
struct queue_limits *limits = &q->limits;
325-
unsigned int max_sectors;
326-
327-
if ((max_hw_sectors << 9) < PAGE_SIZE) {
328-
max_hw_sectors = 1 << (PAGE_SHIFT - 9);
329-
pr_info("%s: set to minimum %u\n", __func__, max_hw_sectors);
330-
}
331-
332-
max_hw_sectors = round_down(max_hw_sectors,
333-
limits->logical_block_size >> SECTOR_SHIFT);
334-
limits->max_hw_sectors = max_hw_sectors;
335-
336-
max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
337-
338-
if (limits->max_user_sectors)
339-
max_sectors = min(max_sectors, limits->max_user_sectors);
340-
else
341-
max_sectors = min(max_sectors, BLK_DEF_MAX_SECTORS_CAP);
342-
343-
max_sectors = round_down(max_sectors,
344-
limits->logical_block_size >> SECTOR_SHIFT);
345-
limits->max_sectors = max_sectors;
346-
347-
if (!q->disk)
348-
return;
349-
q->disk->bdi->io_pages = max_sectors >> (PAGE_SHIFT - 9);
350-
}
351-
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
352-
353287
/**
354288
* blk_queue_chunk_sectors - set size of the chunk for this queue
355289
* @q: the request queue for the device
@@ -436,65 +370,6 @@ void blk_queue_max_zone_append_sectors(struct request_queue *q,
436370
}
437371
EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors);
438372

439-
/**
440-
* blk_queue_max_segments - set max hw segments for a request for this queue
441-
* @q: the request queue for the device
442-
* @max_segments: max number of segments
443-
*
444-
* Description:
445-
* Enables a low level driver to set an upper limit on the number of
446-
* hw data segments in a request.
447-
**/
448-
void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
449-
{
450-
if (!max_segments) {
451-
max_segments = 1;
452-
pr_info("%s: set to minimum %u\n", __func__, max_segments);
453-
}
454-
455-
q->limits.max_segments = max_segments;
456-
}
457-
EXPORT_SYMBOL(blk_queue_max_segments);
458-
459-
/**
460-
* blk_queue_max_discard_segments - set max segments for discard requests
461-
* @q: the request queue for the device
462-
* @max_segments: max number of segments
463-
*
464-
* Description:
465-
* Enables a low level driver to set an upper limit on the number of
466-
* segments in a discard request.
467-
**/
468-
void blk_queue_max_discard_segments(struct request_queue *q,
469-
unsigned short max_segments)
470-
{
471-
q->limits.max_discard_segments = max_segments;
472-
}
473-
EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments);
474-
475-
/**
476-
* blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
477-
* @q: the request queue for the device
478-
* @max_size: max size of segment in bytes
479-
*
480-
* Description:
481-
* Enables a low level driver to set an upper limit on the size of a
482-
* coalesced segment
483-
**/
484-
void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
485-
{
486-
if (max_size < PAGE_SIZE) {
487-
max_size = PAGE_SIZE;
488-
pr_info("%s: set to minimum %u\n", __func__, max_size);
489-
}
490-
491-
/* see blk_queue_virt_boundary() for the explanation */
492-
WARN_ON_ONCE(q->limits.virt_boundary_mask);
493-
494-
q->limits.max_segment_size = max_size;
495-
}
496-
EXPORT_SYMBOL(blk_queue_max_segment_size);
497-
498373
/**
499374
* blk_queue_logical_block_size - set logical block size for the queue
500375
* @q: the request queue for the device
@@ -661,29 +536,6 @@ void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
661536
}
662537
EXPORT_SYMBOL(blk_limits_io_opt);
663538

664-
/**
665-
* blk_queue_io_opt - set optimal request size for the queue
666-
* @q: the request queue for the device
667-
* @opt: optimal request size in bytes
668-
*
669-
* Description:
670-
* Storage devices may report an optimal I/O size, which is the
671-
* device's preferred unit for sustained I/O. This is rarely reported
672-
* for disk drives. For RAID arrays it is usually the stripe width or
673-
* the internal track size. A properly aligned multiple of
674-
* optimal_io_size is the preferred request size for workloads where
675-
* sustained throughput is desired.
676-
*/
677-
void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
678-
{
679-
blk_limits_io_opt(&q->limits, opt);
680-
if (!q->disk)
681-
return;
682-
q->disk->bdi->ra_pages =
683-
max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
684-
}
685-
EXPORT_SYMBOL(blk_queue_io_opt);
686-
687539
static int queue_limit_alignment_offset(const struct queue_limits *lim,
688540
sector_t sector)
689541
{
@@ -933,81 +785,6 @@ void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
933785
}
934786
EXPORT_SYMBOL(blk_queue_update_dma_pad);
935787

936-
/**
937-
* blk_queue_segment_boundary - set boundary rules for segment merging
938-
* @q: the request queue for the device
939-
* @mask: the memory boundary mask
940-
**/
941-
void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
942-
{
943-
if (mask < PAGE_SIZE - 1) {
944-
mask = PAGE_SIZE - 1;
945-
pr_info("%s: set to minimum %lx\n", __func__, mask);
946-
}
947-
948-
q->limits.seg_boundary_mask = mask;
949-
}
950-
EXPORT_SYMBOL(blk_queue_segment_boundary);
951-
952-
/**
953-
* blk_queue_virt_boundary - set boundary rules for bio merging
954-
* @q: the request queue for the device
955-
* @mask: the memory boundary mask
956-
**/
957-
void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
958-
{
959-
q->limits.virt_boundary_mask = mask;
960-
961-
/*
962-
* Devices that require a virtual boundary do not support scatter/gather
963-
* I/O natively, but instead require a descriptor list entry for each
964-
* page (which might not be idential to the Linux PAGE_SIZE). Because
965-
* of that they are not limited by our notion of "segment size".
966-
*/
967-
if (mask)
968-
q->limits.max_segment_size = UINT_MAX;
969-
}
970-
EXPORT_SYMBOL(blk_queue_virt_boundary);
971-
972-
/**
973-
* blk_queue_dma_alignment - set dma length and memory alignment
974-
* @q: the request queue for the device
975-
* @mask: alignment mask
976-
*
977-
* description:
978-
* set required memory and length alignment for direct dma transactions.
979-
* this is used when building direct io requests for the queue.
980-
*
981-
**/
982-
void blk_queue_dma_alignment(struct request_queue *q, int mask)
983-
{
984-
q->limits.dma_alignment = mask;
985-
}
986-
EXPORT_SYMBOL(blk_queue_dma_alignment);
987-
988-
/**
989-
* blk_queue_update_dma_alignment - update dma length and memory alignment
990-
* @q: the request queue for the device
991-
* @mask: alignment mask
992-
*
993-
* description:
994-
* update required memory and length alignment for direct dma transactions.
995-
* If the requested alignment is larger than the current alignment, then
996-
* the current queue alignment is updated to the new value, otherwise it
997-
* is left alone. The design of this is to allow multiple objects
998-
* (driver, device, transport etc) to set their respective
999-
* alignments without having them interfere.
1000-
*
1001-
**/
1002-
void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
1003-
{
1004-
BUG_ON(mask > PAGE_SIZE);
1005-
1006-
if (mask > q->limits.dma_alignment)
1007-
q->limits.dma_alignment = mask;
1008-
}
1009-
EXPORT_SYMBOL(blk_queue_update_dma_alignment);
1010-
1011788
/**
1012789
* blk_set_queue_depth - tell the block layer about the device queue depth
1013790
* @q: the request queue for the device
@@ -1061,28 +838,6 @@ void blk_queue_required_elevator_features(struct request_queue *q,
1061838
}
1062839
EXPORT_SYMBOL_GPL(blk_queue_required_elevator_features);
1063840

1064-
/**
1065-
* blk_queue_can_use_dma_map_merging - configure queue for merging segments.
1066-
* @q: the request queue for the device
1067-
* @dev: the device pointer for dma
1068-
*
1069-
* Tell the block layer about merging the segments by dma map of @q.
1070-
*/
1071-
bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
1072-
struct device *dev)
1073-
{
1074-
unsigned long boundary = dma_get_merge_boundary(dev);
1075-
1076-
if (!boundary)
1077-
return false;
1078-
1079-
/* No need to update max_segment_size. see blk_queue_virt_boundary() */
1080-
blk_queue_virt_boundary(q, boundary);
1081-
1082-
return true;
1083-
}
1084-
EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
1085-
1086841
/**
1087842
* disk_set_zoned - inidicate a zoned device
1088843
* @disk: gendisk to configure

block/bsg-lib.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -354,12 +354,14 @@ static const struct blk_mq_ops bsg_mq_ops = {
354354
* bsg_setup_queue - Create and add the bsg hooks so we can receive requests
355355
* @dev: device to attach bsg device to
356356
* @name: device to give bsg device
357+
* @lim: queue limits for the bsg queue
357358
* @job_fn: bsg job handler
358359
* @timeout: timeout handler function pointer
359360
* @dd_job_size: size of LLD data needed for each job
360361
*/
361362
struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
362-
bsg_job_fn *job_fn, bsg_timeout_fn *timeout, int dd_job_size)
363+
struct queue_limits *lim, bsg_job_fn *job_fn,
364+
bsg_timeout_fn *timeout, int dd_job_size)
363365
{
364366
struct bsg_set *bset;
365367
struct blk_mq_tag_set *set;
@@ -383,7 +385,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
383385
if (blk_mq_alloc_tag_set(set))
384386
goto out_tag_set;
385387

386-
q = blk_mq_alloc_queue(set, NULL, NULL);
388+
q = blk_mq_alloc_queue(set, lim, NULL);
387389
if (IS_ERR(q)) {
388390
ret = PTR_ERR(q);
389391
goto out_queue;

drivers/ata/ahci.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -397,7 +397,7 @@ extern const struct attribute_group *ahci_sdev_groups[];
397397
.sdev_groups = ahci_sdev_groups, \
398398
.change_queue_depth = ata_scsi_change_queue_depth, \
399399
.tag_alloc_policy = BLK_TAG_ALLOC_RR, \
400-
.slave_configure = ata_scsi_slave_config
400+
.device_configure = ata_scsi_device_configure
401401

402402
extern struct ata_port_operations ahci_ops;
403403
extern struct ata_port_operations ahci_platform_ops;

drivers/ata/libata-sata.c

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1254,21 +1254,24 @@ void ata_sas_tport_delete(struct ata_port *ap)
12541254
EXPORT_SYMBOL_GPL(ata_sas_tport_delete);
12551255

12561256
/**
1257-
* ata_sas_slave_configure - Default slave_config routine for libata devices
1257+
* ata_sas_device_configure - Default device_configure routine for libata
1258+
* devices
12581259
* @sdev: SCSI device to configure
1260+
* @lim: queue limits
12591261
* @ap: ATA port to which SCSI device is attached
12601262
*
12611263
* RETURNS:
12621264
* Zero.
12631265
*/
12641266

1265-
int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap)
1267+
int ata_sas_device_configure(struct scsi_device *sdev, struct queue_limits *lim,
1268+
struct ata_port *ap)
12661269
{
12671270
ata_scsi_sdev_config(sdev);
12681271

1269-
return ata_scsi_dev_config(sdev, ap->link.device);
1272+
return ata_scsi_dev_config(sdev, lim, ap->link.device);
12701273
}
1271-
EXPORT_SYMBOL_GPL(ata_sas_slave_configure);
1274+
EXPORT_SYMBOL_GPL(ata_sas_device_configure);
12721275

12731276
/**
12741277
* ata_sas_queuecmd - Issue SCSI cdb to libata-managed device

0 commit comments

Comments
 (0)