Skip to content

Commit ec21163

Browse files
Ming LeiMike Snitzer
authored andcommitted
dm: put all polled dm_io instances into a single list
Now that bio_split() isn't used by DM's bio splitting, it is a bit overkill to link dm_io into an hlist given there is only single dm_io in the list. Convert to using a single list for holding all dm_io instances associated with this bio. Signed-off-by: Ming Lei <[email protected]> Signed-off-by: Mike Snitzer <[email protected]>
1 parent 0f14d60 commit ec21163

File tree

2 files changed

+28
-26
lines changed

2 files changed

+28
-26
lines changed

drivers/md/dm-core.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -259,7 +259,7 @@ struct dm_io {
259259
spinlock_t lock;
260260
unsigned long start_time;
261261
void *data;
262-
struct hlist_node node;
262+
struct dm_io *next;
263263
struct task_struct *map_task;
264264
struct dm_stats_aux stats_aux;
265265

drivers/md/dm.c

Lines changed: 27 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -1559,44 +1559,45 @@ static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
15591559
}
15601560

15611561
/*
1562-
* Reuse ->bi_private as hlist head for storing all dm_io instances
1562+
* Reuse ->bi_private as dm_io list head for storing all dm_io instances
15631563
* associated with this bio, and this bio's bi_private needs to be
15641564
* stored in dm_io->data before the reuse.
15651565
*
15661566
* bio->bi_private is owned by fs or upper layer, so block layer won't
15671567
* touch it after splitting. Meantime it won't be changed by anyone after
15681568
* bio is submitted. So this reuse is safe.
15691569
*/
1570-
static inline struct hlist_head *dm_get_bio_hlist_head(struct bio *bio)
1570+
static inline struct dm_io **dm_poll_list_head(struct bio *bio)
15711571
{
1572-
return (struct hlist_head *)&bio->bi_private;
1572+
return (struct dm_io **)&bio->bi_private;
15731573
}
15741574

15751575
static void dm_queue_poll_io(struct bio *bio, struct dm_io *io)
15761576
{
1577-
struct hlist_head *head = dm_get_bio_hlist_head(bio);
1577+
struct dm_io **head = dm_poll_list_head(bio);
15781578

15791579
if (!(bio->bi_opf & REQ_DM_POLL_LIST)) {
15801580
bio->bi_opf |= REQ_DM_POLL_LIST;
15811581
/*
15821582
* Save .bi_private into dm_io, so that we can reuse
1583-
* .bi_private as hlist head for storing dm_io list
1583+
* .bi_private as dm_io list head for storing dm_io list
15841584
*/
15851585
io->data = bio->bi_private;
15861586

1587-
INIT_HLIST_HEAD(head);
1588-
15891587
/* tell block layer to poll for completion */
15901588
bio->bi_cookie = ~BLK_QC_T_NONE;
1589+
1590+
io->next = NULL;
15911591
} else {
15921592
/*
15931593
* bio recursed due to split, reuse original poll list,
15941594
* and save bio->bi_private too.
15951595
*/
1596-
io->data = hlist_entry(head->first, struct dm_io, node)->data;
1596+
io->data = (*head)->data;
1597+
io->next = *head;
15971598
}
15981599

1599-
hlist_add_head(&io->node, head);
1600+
*head = io;
16001601
}
16011602

16021603
/*
@@ -1685,8 +1686,8 @@ static void dm_split_and_process_bio(struct mapped_device *md,
16851686
* Drop the extra reference count for non-POLLED bio, and hold one
16861687
* reference for POLLED bio, which will be released in dm_poll_bio
16871688
*
1688-
* Add every dm_io instance into the hlist_head which is stored in
1689-
* bio->bi_private, so that dm_poll_bio can poll them all.
1689+
* Add every dm_io instance into the dm_io list head which is stored
1690+
* in bio->bi_private, so that dm_poll_bio can poll them all.
16901691
*/
16911692
if (error || !ci.submit_as_polled) {
16921693
/*
@@ -1748,18 +1749,16 @@ static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob,
17481749
static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob,
17491750
unsigned int flags)
17501751
{
1751-
struct hlist_head *head = dm_get_bio_hlist_head(bio);
1752-
struct hlist_head tmp = HLIST_HEAD_INIT;
1753-
struct hlist_node *next;
1754-
struct dm_io *io;
1752+
struct dm_io **head = dm_poll_list_head(bio);
1753+
struct dm_io *list = *head;
1754+
struct dm_io *tmp = NULL;
1755+
struct dm_io *curr, *next;
17551756

17561757
/* Only poll normal bio which was marked as REQ_DM_POLL_LIST */
17571758
if (!(bio->bi_opf & REQ_DM_POLL_LIST))
17581759
return 0;
17591760

1760-
WARN_ON_ONCE(hlist_empty(head));
1761-
1762-
hlist_move_list(head, &tmp);
1761+
WARN_ON_ONCE(!list);
17631762

17641763
/*
17651764
* Restore .bi_private before possibly completing dm_io.
@@ -1770,24 +1769,27 @@ static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob,
17701769
* clearing REQ_DM_POLL_LIST here.
17711770
*/
17721771
bio->bi_opf &= ~REQ_DM_POLL_LIST;
1773-
bio->bi_private = hlist_entry(tmp.first, struct dm_io, node)->data;
1772+
bio->bi_private = list->data;
17741773

1775-
hlist_for_each_entry_safe(io, next, &tmp, node) {
1776-
if (dm_poll_dm_io(io, iob, flags)) {
1777-
hlist_del_init(&io->node);
1774+
for (curr = list, next = curr->next; curr; curr = next, next =
1775+
curr ? curr->next : NULL) {
1776+
if (dm_poll_dm_io(curr, iob, flags)) {
17781777
/*
17791778
* clone_endio() has already occurred, so no
17801779
* error handling is needed here.
17811780
*/
1782-
__dm_io_dec_pending(io);
1781+
__dm_io_dec_pending(curr);
1782+
} else {
1783+
curr->next = tmp;
1784+
tmp = curr;
17831785
}
17841786
}
17851787

17861788
/* Not done? */
1787-
if (!hlist_empty(&tmp)) {
1789+
if (tmp) {
17881790
bio->bi_opf |= REQ_DM_POLL_LIST;
17891791
/* Reset bio->bi_private to dm_io list head */
1790-
hlist_move_list(&tmp, head);
1792+
*head = tmp;
17911793
return 0;
17921794
}
17931795
return 1;

0 commit comments

Comments
 (0)