Skip to content

Commit ec84ca4

Browse files
Christoph Hellwigmartinkpetersen
authored andcommitted
scsi: block: Remove now unused queue limits helpers
Signed-off-by: Christoph Hellwig <[email protected]> Link: https://lore.kernel.org/r/[email protected] Reviewed-by: Bart Van Assche <[email protected]> Reviewed-by: John Garry <[email protected]> Reviewed-by: Damien Le Moal <[email protected]> Reviewed-by: Hannes Reinecke <[email protected]> Reviewed-by: Johannes Thumshirn <[email protected]> Signed-off-by: Martin K. Petersen <[email protected]>
1 parent 84a44a8 commit ec84ca4

File tree

4 files changed

+5
-263
lines changed

4 files changed

+5
-263
lines changed

block/blk-settings.c

Lines changed: 0 additions & 245 deletions
Original file line numberDiff line numberDiff line change
@@ -284,72 +284,6 @@ int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
284284
}
285285
EXPORT_SYMBOL_GPL(queue_limits_set);
286286

287-
/**
288-
* blk_queue_bounce_limit - set bounce buffer limit for queue
289-
* @q: the request queue for the device
290-
* @bounce: bounce limit to enforce
291-
*
292-
* Description:
293-
* Force bouncing for ISA DMA ranges or highmem.
294-
*
295-
* DEPRECATED, don't use in new code.
296-
**/
297-
void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce)
298-
{
299-
q->limits.bounce = bounce;
300-
}
301-
EXPORT_SYMBOL(blk_queue_bounce_limit);
302-
303-
/**
304-
* blk_queue_max_hw_sectors - set max sectors for a request for this queue
305-
* @q: the request queue for the device
306-
* @max_hw_sectors: max hardware sectors in the usual 512b unit
307-
*
308-
* Description:
309-
* Enables a low level driver to set a hard upper limit,
310-
* max_hw_sectors, on the size of requests. max_hw_sectors is set by
311-
* the device driver based upon the capabilities of the I/O
312-
* controller.
313-
*
314-
* max_dev_sectors is a hard limit imposed by the storage device for
315-
* READ/WRITE requests. It is set by the disk driver.
316-
*
317-
* max_sectors is a soft limit imposed by the block layer for
318-
* filesystem type requests. This value can be overridden on a
319-
* per-device basis in /sys/block/<device>/queue/max_sectors_kb.
320-
* The soft limit can not exceed max_hw_sectors.
321-
**/
322-
void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
323-
{
324-
struct queue_limits *limits = &q->limits;
325-
unsigned int max_sectors;
326-
327-
if ((max_hw_sectors << 9) < PAGE_SIZE) {
328-
max_hw_sectors = 1 << (PAGE_SHIFT - 9);
329-
pr_info("%s: set to minimum %u\n", __func__, max_hw_sectors);
330-
}
331-
332-
max_hw_sectors = round_down(max_hw_sectors,
333-
limits->logical_block_size >> SECTOR_SHIFT);
334-
limits->max_hw_sectors = max_hw_sectors;
335-
336-
max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
337-
338-
if (limits->max_user_sectors)
339-
max_sectors = min(max_sectors, limits->max_user_sectors);
340-
else
341-
max_sectors = min(max_sectors, BLK_DEF_MAX_SECTORS_CAP);
342-
343-
max_sectors = round_down(max_sectors,
344-
limits->logical_block_size >> SECTOR_SHIFT);
345-
limits->max_sectors = max_sectors;
346-
347-
if (!q->disk)
348-
return;
349-
q->disk->bdi->io_pages = max_sectors >> (PAGE_SHIFT - 9);
350-
}
351-
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
352-
353287
/**
354288
* blk_queue_chunk_sectors - set size of the chunk for this queue
355289
* @q: the request queue for the device
@@ -436,65 +370,6 @@ void blk_queue_max_zone_append_sectors(struct request_queue *q,
436370
}
437371
EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors);
438372

439-
/**
440-
* blk_queue_max_segments - set max hw segments for a request for this queue
441-
* @q: the request queue for the device
442-
* @max_segments: max number of segments
443-
*
444-
* Description:
445-
* Enables a low level driver to set an upper limit on the number of
446-
* hw data segments in a request.
447-
**/
448-
void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
449-
{
450-
if (!max_segments) {
451-
max_segments = 1;
452-
pr_info("%s: set to minimum %u\n", __func__, max_segments);
453-
}
454-
455-
q->limits.max_segments = max_segments;
456-
}
457-
EXPORT_SYMBOL(blk_queue_max_segments);
458-
459-
/**
460-
* blk_queue_max_discard_segments - set max segments for discard requests
461-
* @q: the request queue for the device
462-
* @max_segments: max number of segments
463-
*
464-
* Description:
465-
* Enables a low level driver to set an upper limit on the number of
466-
* segments in a discard request.
467-
**/
468-
void blk_queue_max_discard_segments(struct request_queue *q,
469-
unsigned short max_segments)
470-
{
471-
q->limits.max_discard_segments = max_segments;
472-
}
473-
EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments);
474-
475-
/**
476-
* blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
477-
* @q: the request queue for the device
478-
* @max_size: max size of segment in bytes
479-
*
480-
* Description:
481-
* Enables a low level driver to set an upper limit on the size of a
482-
* coalesced segment
483-
**/
484-
void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
485-
{
486-
if (max_size < PAGE_SIZE) {
487-
max_size = PAGE_SIZE;
488-
pr_info("%s: set to minimum %u\n", __func__, max_size);
489-
}
490-
491-
/* see blk_queue_virt_boundary() for the explanation */
492-
WARN_ON_ONCE(q->limits.virt_boundary_mask);
493-
494-
q->limits.max_segment_size = max_size;
495-
}
496-
EXPORT_SYMBOL(blk_queue_max_segment_size);
497-
498373
/**
499374
* blk_queue_logical_block_size - set logical block size for the queue
500375
* @q: the request queue for the device
@@ -661,29 +536,6 @@ void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
661536
}
662537
EXPORT_SYMBOL(blk_limits_io_opt);
663538

664-
/**
665-
* blk_queue_io_opt - set optimal request size for the queue
666-
* @q: the request queue for the device
667-
* @opt: optimal request size in bytes
668-
*
669-
* Description:
670-
* Storage devices may report an optimal I/O size, which is the
671-
* device's preferred unit for sustained I/O. This is rarely reported
672-
* for disk drives. For RAID arrays it is usually the stripe width or
673-
* the internal track size. A properly aligned multiple of
674-
* optimal_io_size is the preferred request size for workloads where
675-
* sustained throughput is desired.
676-
*/
677-
void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
678-
{
679-
blk_limits_io_opt(&q->limits, opt);
680-
if (!q->disk)
681-
return;
682-
q->disk->bdi->ra_pages =
683-
max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
684-
}
685-
EXPORT_SYMBOL(blk_queue_io_opt);
686-
687539
static int queue_limit_alignment_offset(const struct queue_limits *lim,
688540
sector_t sector)
689541
{
@@ -933,81 +785,6 @@ void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
933785
}
934786
EXPORT_SYMBOL(blk_queue_update_dma_pad);
935787

936-
/**
937-
* blk_queue_segment_boundary - set boundary rules for segment merging
938-
* @q: the request queue for the device
939-
* @mask: the memory boundary mask
940-
**/
941-
void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
942-
{
943-
if (mask < PAGE_SIZE - 1) {
944-
mask = PAGE_SIZE - 1;
945-
pr_info("%s: set to minimum %lx\n", __func__, mask);
946-
}
947-
948-
q->limits.seg_boundary_mask = mask;
949-
}
950-
EXPORT_SYMBOL(blk_queue_segment_boundary);
951-
952-
/**
953-
* blk_queue_virt_boundary - set boundary rules for bio merging
954-
* @q: the request queue for the device
955-
* @mask: the memory boundary mask
956-
**/
957-
void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
958-
{
959-
q->limits.virt_boundary_mask = mask;
960-
961-
/*
962-
* Devices that require a virtual boundary do not support scatter/gather
963-
* I/O natively, but instead require a descriptor list entry for each
964-
* page (which might not be idential to the Linux PAGE_SIZE). Because
965-
* of that they are not limited by our notion of "segment size".
966-
*/
967-
if (mask)
968-
q->limits.max_segment_size = UINT_MAX;
969-
}
970-
EXPORT_SYMBOL(blk_queue_virt_boundary);
971-
972-
/**
973-
* blk_queue_dma_alignment - set dma length and memory alignment
974-
* @q: the request queue for the device
975-
* @mask: alignment mask
976-
*
977-
* description:
978-
* set required memory and length alignment for direct dma transactions.
979-
* this is used when building direct io requests for the queue.
980-
*
981-
**/
982-
void blk_queue_dma_alignment(struct request_queue *q, int mask)
983-
{
984-
q->limits.dma_alignment = mask;
985-
}
986-
EXPORT_SYMBOL(blk_queue_dma_alignment);
987-
988-
/**
989-
* blk_queue_update_dma_alignment - update dma length and memory alignment
990-
* @q: the request queue for the device
991-
* @mask: alignment mask
992-
*
993-
* description:
994-
* update required memory and length alignment for direct dma transactions.
995-
* If the requested alignment is larger than the current alignment, then
996-
* the current queue alignment is updated to the new value, otherwise it
997-
* is left alone. The design of this is to allow multiple objects
998-
* (driver, device, transport etc) to set their respective
999-
* alignments without having them interfere.
1000-
*
1001-
**/
1002-
void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
1003-
{
1004-
BUG_ON(mask > PAGE_SIZE);
1005-
1006-
if (mask > q->limits.dma_alignment)
1007-
q->limits.dma_alignment = mask;
1008-
}
1009-
EXPORT_SYMBOL(blk_queue_update_dma_alignment);
1010-
1011788
/**
1012789
* blk_set_queue_depth - tell the block layer about the device queue depth
1013790
* @q: the request queue for the device
@@ -1061,28 +838,6 @@ void blk_queue_required_elevator_features(struct request_queue *q,
1061838
}
1062839
EXPORT_SYMBOL_GPL(blk_queue_required_elevator_features);
1063840

1064-
/**
1065-
* blk_queue_can_use_dma_map_merging - configure queue for merging segments.
1066-
* @q: the request queue for the device
1067-
* @dev: the device pointer for dma
1068-
*
1069-
* Tell the block layer about merging the segments by dma map of @q.
1070-
*/
1071-
bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
1072-
struct device *dev)
1073-
{
1074-
unsigned long boundary = dma_get_merge_boundary(dev);
1075-
1076-
if (!boundary)
1077-
return false;
1078-
1079-
/* No need to update max_segment_size. see blk_queue_virt_boundary() */
1080-
blk_queue_virt_boundary(q, boundary);
1081-
1082-
return true;
1083-
}
1084-
EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
1085-
1086841
/**
1087842
* disk_set_zoned - inidicate a zoned device
1088843
* @disk: gendisk to configure

drivers/s390/block/dasd_eckd.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4561,9 +4561,9 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
45614561
len_to_track_end = 0;
45624562
/*
45634563
* A tidaw can address 4k of memory, but must not cross page boundaries
4564-
* We can let the block layer handle this by setting
4565-
* blk_queue_segment_boundary to page boundaries and
4566-
* blk_max_segment_size to page size when setting up the request queue.
4564+
* We can let the block layer handle this by setting seg_boundary_mask
4565+
* to page boundaries and max_segment_size to page size when setting up
4566+
* the request queue.
45674567
* For write requests, a TIDAW must not cross track boundaries, because
45684568
* we have to set the CBC flag on the last tidaw for each track.
45694569
*/

include/linux/blkdev.h

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -908,15 +908,9 @@ static inline void queue_limits_cancel_update(struct request_queue *q)
908908
/*
909909
* Access functions for manipulating queue properties
910910
*/
911-
void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit);
912-
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
913911
extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
914-
extern void blk_queue_max_segments(struct request_queue *, unsigned short);
915-
extern void blk_queue_max_discard_segments(struct request_queue *,
916-
unsigned short);
917912
void blk_queue_max_secure_erase_sectors(struct request_queue *q,
918913
unsigned int max_sectors);
919-
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
920914
extern void blk_queue_max_discard_sectors(struct request_queue *q,
921915
unsigned int max_discard_sectors);
922916
extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
@@ -933,18 +927,13 @@ void disk_update_readahead(struct gendisk *disk);
933927
extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
934928
extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
935929
extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
936-
extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
937930
extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
938931
extern void blk_set_stacking_limits(struct queue_limits *lim);
939932
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
940933
sector_t offset);
941934
void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
942935
sector_t offset, const char *pfx);
943936
extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
944-
extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
945-
extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
946-
extern void blk_queue_dma_alignment(struct request_queue *, int);
947-
extern void blk_queue_update_dma_alignment(struct request_queue *, int);
948937
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
949938
extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
950939

@@ -961,8 +950,6 @@ void disk_set_independent_access_ranges(struct gendisk *disk,
961950

962951
extern void blk_queue_required_elevator_features(struct request_queue *q,
963952
unsigned int features);
964-
extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
965-
struct device *dev);
966953

967954
bool __must_check blk_get_queue(struct request_queue *);
968955
extern void blk_put_queue(struct request_queue *);

include/linux/mmc/host.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -433,8 +433,8 @@ struct mmc_host {
433433
mmc_pm_flag_t pm_caps; /* supported pm features */
434434

435435
/* host specific block data */
436-
unsigned int max_seg_size; /* see blk_queue_max_segment_size */
437-
unsigned short max_segs; /* see blk_queue_max_segments */
436+
unsigned int max_seg_size; /* lim->max_segment_size */
437+
unsigned short max_segs; /* lim->max_segments */
438438
unsigned short unused;
439439
unsigned int max_req_size; /* maximum number of bytes in one req */
440440
unsigned int max_blk_size; /* maximum size of one mmc block */

0 commit comments

Comments
 (0)