@@ -1559,44 +1559,45 @@ static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
1559
1559
}
1560
1560
1561
1561
/*
1562
- * Reuse ->bi_private as hlist head for storing all dm_io instances
1562
+ * Reuse ->bi_private as dm_io list head for storing all dm_io instances
1563
1563
* associated with this bio, and this bio's bi_private needs to be
1564
1564
* stored in dm_io->data before the reuse.
1565
1565
*
1566
1566
* bio->bi_private is owned by fs or upper layer, so block layer won't
1567
1567
* touch it after splitting. Meantime it won't be changed by anyone after
1568
1568
* bio is submitted. So this reuse is safe.
1569
1569
*/
1570
- static inline struct hlist_head * dm_get_bio_hlist_head (struct bio * bio )
1570
+ static inline struct dm_io * * dm_poll_list_head (struct bio * bio )
1571
1571
{
1572
- return (struct hlist_head * )& bio -> bi_private ;
1572
+ return (struct dm_io * * )& bio -> bi_private ;
1573
1573
}
1574
1574
1575
1575
static void dm_queue_poll_io (struct bio * bio , struct dm_io * io )
1576
1576
{
1577
- struct hlist_head * head = dm_get_bio_hlist_head (bio );
1577
+ struct dm_io * * head = dm_poll_list_head (bio );
1578
1578
1579
1579
if (!(bio -> bi_opf & REQ_DM_POLL_LIST )) {
1580
1580
bio -> bi_opf |= REQ_DM_POLL_LIST ;
1581
1581
/*
1582
1582
* Save .bi_private into dm_io, so that we can reuse
1583
- * .bi_private as hlist head for storing dm_io list
1583
+ * .bi_private as dm_io list head for storing dm_io list
1584
1584
*/
1585
1585
io -> data = bio -> bi_private ;
1586
1586
1587
- INIT_HLIST_HEAD (head );
1588
-
1589
1587
/* tell block layer to poll for completion */
1590
1588
bio -> bi_cookie = ~BLK_QC_T_NONE ;
1589
+
1590
+ io -> next = NULL ;
1591
1591
} else {
1592
1592
/*
1593
1593
* bio recursed due to split, reuse original poll list,
1594
1594
* and save bio->bi_private too.
1595
1595
*/
1596
- io -> data = hlist_entry (head -> first , struct dm_io , node )-> data ;
1596
+ io -> data = (* head )-> data ;
1597
+ io -> next = * head ;
1597
1598
}
1598
1599
1599
- hlist_add_head ( & io -> node , head ) ;
1600
+ * head = io ;
1600
1601
}
1601
1602
1602
1603
/*
@@ -1685,8 +1686,8 @@ static void dm_split_and_process_bio(struct mapped_device *md,
1685
1686
* Drop the extra reference count for non-POLLED bio, and hold one
1686
1687
* reference for POLLED bio, which will be released in dm_poll_bio
1687
1688
*
1688
- * Add every dm_io instance into the hlist_head which is stored in
1689
- * bio->bi_private, so that dm_poll_bio can poll them all.
1689
+ * Add every dm_io instance into the dm_io list head which is stored
1690
+ * in bio->bi_private, so that dm_poll_bio can poll them all.
1690
1691
*/
1691
1692
if (error || !ci .submit_as_polled ) {
1692
1693
/*
@@ -1748,18 +1749,16 @@ static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob,
1748
1749
static int dm_poll_bio (struct bio * bio , struct io_comp_batch * iob ,
1749
1750
unsigned int flags )
1750
1751
{
1751
- struct hlist_head * head = dm_get_bio_hlist_head (bio );
1752
- struct hlist_head tmp = HLIST_HEAD_INIT ;
1753
- struct hlist_node * next ;
1754
- struct dm_io * io ;
1752
+ struct dm_io * * head = dm_poll_list_head (bio );
1753
+ struct dm_io * list = * head ;
1754
+ struct dm_io * tmp = NULL ;
1755
+ struct dm_io * curr , * next ;
1755
1756
1756
1757
/* Only poll normal bio which was marked as REQ_DM_POLL_LIST */
1757
1758
if (!(bio -> bi_opf & REQ_DM_POLL_LIST ))
1758
1759
return 0 ;
1759
1760
1760
- WARN_ON_ONCE (hlist_empty (head ));
1761
-
1762
- hlist_move_list (head , & tmp );
1761
+ WARN_ON_ONCE (!list );
1763
1762
1764
1763
/*
1765
1764
* Restore .bi_private before possibly completing dm_io.
@@ -1770,24 +1769,27 @@ static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob,
1770
1769
* clearing REQ_DM_POLL_LIST here.
1771
1770
*/
1772
1771
bio -> bi_opf &= ~REQ_DM_POLL_LIST ;
1773
- bio -> bi_private = hlist_entry ( tmp . first , struct dm_io , node ) -> data ;
1772
+ bio -> bi_private = list -> data ;
1774
1773
1775
- hlist_for_each_entry_safe ( io , next , & tmp , node ) {
1776
- if ( dm_poll_dm_io ( io , iob , flags ) ) {
1777
- hlist_del_init ( & io -> node );
1774
+ for ( curr = list , next = curr -> next ; curr ; curr = next , next =
1775
+ curr ? curr -> next : NULL ) {
1776
+ if ( dm_poll_dm_io ( curr , iob , flags )) {
1778
1777
/*
1779
1778
* clone_endio() has already occurred, so no
1780
1779
* error handling is needed here.
1781
1780
*/
1782
- __dm_io_dec_pending (io );
1781
+ __dm_io_dec_pending (curr );
1782
+ } else {
1783
+ curr -> next = tmp ;
1784
+ tmp = curr ;
1783
1785
}
1784
1786
}
1785
1787
1786
1788
/* Not done? */
1787
- if (! hlist_empty ( & tmp ) ) {
1789
+ if (tmp ) {
1788
1790
bio -> bi_opf |= REQ_DM_POLL_LIST ;
1789
1791
/* Reset bio->bi_private to dm_io list head */
1790
- hlist_move_list ( & tmp , head ) ;
1792
+ * head = tmp ;
1791
1793
return 0 ;
1792
1794
}
1793
1795
return 1 ;
0 commit comments