@@ -975,7 +975,7 @@ static void clone_endio(struct bio *bio)
975
975
dm_endio_fn endio = tio -> ti -> type -> end_io ;
976
976
struct bio * orig_bio = io -> orig_bio ;
977
977
978
- if (unlikely (error == BLK_STS_TARGET ) && md -> type != DM_TYPE_NVME_BIO_BASED ) {
978
+ if (unlikely (error == BLK_STS_TARGET )) {
979
979
if (bio_op (bio ) == REQ_OP_DISCARD &&
980
980
!bio -> bi_disk -> queue -> limits .max_discard_sectors )
981
981
disable_discard (md );
@@ -1626,45 +1626,6 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
1626
1626
return ret ;
1627
1627
}
1628
1628
1629
- /*
1630
- * Optimized variant of __split_and_process_bio that leverages the
1631
- * fact that targets that use it do _not_ have a need to split bios.
1632
- */
1633
- static blk_qc_t __process_bio (struct mapped_device * md , struct dm_table * map ,
1634
- struct bio * bio )
1635
- {
1636
- struct clone_info ci ;
1637
- blk_qc_t ret = BLK_QC_T_NONE ;
1638
- int error = 0 ;
1639
-
1640
- init_clone_info (& ci , md , map , bio );
1641
-
1642
- if (bio -> bi_opf & REQ_PREFLUSH ) {
1643
- error = __send_empty_flush (& ci );
1644
- /* dec_pending submits any data associated with flush */
1645
- } else {
1646
- struct dm_target_io * tio ;
1647
- struct dm_target * ti = md -> immutable_target ;
1648
-
1649
- if (WARN_ON_ONCE (!ti )) {
1650
- error = - EIO ;
1651
- goto out ;
1652
- }
1653
-
1654
- ci .bio = bio ;
1655
- ci .sector_count = bio_sectors (bio );
1656
- if (__process_abnormal_io (& ci , ti , & error ))
1657
- goto out ;
1658
-
1659
- tio = alloc_tio (& ci , ti , 0 , GFP_NOIO );
1660
- ret = __clone_and_map_simple_bio (& ci , tio , NULL );
1661
- }
1662
- out :
1663
- /* drop the extra reference count */
1664
- dec_pending (ci .io , errno_to_blk_status (error ));
1665
- return ret ;
1666
- }
1667
-
1668
1629
static blk_qc_t dm_submit_bio (struct bio * bio )
1669
1630
{
1670
1631
struct mapped_device * md = bio -> bi_disk -> private_data ;
@@ -1710,10 +1671,7 @@ static blk_qc_t dm_submit_bio(struct bio *bio)
1710
1671
if (is_abnormal_io (bio ))
1711
1672
blk_queue_split (& bio );
1712
1673
1713
- if (dm_get_md_type (md ) == DM_TYPE_NVME_BIO_BASED )
1714
- ret = __process_bio (md , map , bio );
1715
- else
1716
- ret = __split_and_process_bio (md , map , bio );
1674
+ ret = __split_and_process_bio (md , map , bio );
1717
1675
out :
1718
1676
dm_put_live_table (md , srcu_idx );
1719
1677
return ret ;
@@ -2038,11 +1996,10 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2038
1996
if (request_based )
2039
1997
dm_stop_queue (q );
2040
1998
2041
- if (request_based || md -> type == DM_TYPE_NVME_BIO_BASED ) {
1999
+ if (request_based ) {
2042
2000
/*
2043
- * Leverage the fact that request-based DM targets and
2044
- * NVMe bio based targets are immutable singletons
2045
- * - used to optimize both __process_bio and dm_mq_queue_rq
2001
+ * Leverage the fact that request-based DM targets are
2002
+ * immutable singletons - used to optimize dm_mq_queue_rq.
2046
2003
*/
2047
2004
md -> immutable_target = dm_table_get_immutable_target (t );
2048
2005
}
@@ -2164,7 +2121,6 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
2164
2121
break ;
2165
2122
case DM_TYPE_BIO_BASED :
2166
2123
case DM_TYPE_DAX_BIO_BASED :
2167
- case DM_TYPE_NVME_BIO_BASED :
2168
2124
break ;
2169
2125
case DM_TYPE_NONE :
2170
2126
WARN_ON_ONCE (true);
@@ -2922,7 +2878,6 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_qu
2922
2878
switch (type ) {
2923
2879
case DM_TYPE_BIO_BASED :
2924
2880
case DM_TYPE_DAX_BIO_BASED :
2925
- case DM_TYPE_NVME_BIO_BASED :
2926
2881
pool_size = max (dm_get_reserved_bio_based_ios (), min_pool_size );
2927
2882
front_pad = roundup (per_io_data_size , __alignof__(struct dm_target_io )) + offsetof(struct dm_target_io , clone );
2928
2883
io_front_pad = roundup (front_pad , __alignof__(struct dm_io )) + offsetof(struct dm_io , tio );
0 commit comments