@@ -84,7 +84,8 @@ struct clone_info {
8484 struct dm_io * io ;
8585 sector_t sector ;
8686 unsigned sector_count ;
87- bool submit_as_polled ;
87+ bool is_abnormal_io :1 ;
88+ bool submit_as_polled :1 ;
8889};
8990
9091#define DM_TARGET_IO_BIO_OFFSET (offsetof(struct dm_target_io, clone))
@@ -1491,21 +1492,24 @@ static void __send_changing_extent_only(struct clone_info *ci, struct dm_target
14911492
14921493static bool is_abnormal_io (struct bio * bio )
14931494{
1494- bool r = false ;
1495+ unsigned int op = bio_op ( bio ) ;
14951496
1496- switch (bio_op (bio )) {
1497- case REQ_OP_DISCARD :
1498- case REQ_OP_SECURE_ERASE :
1499- case REQ_OP_WRITE_ZEROES :
1500- r = true;
1501- break ;
1497+ if (op != REQ_OP_READ && op != REQ_OP_WRITE && op != REQ_OP_FLUSH ) {
1498+ switch (op ) {
1499+ case REQ_OP_DISCARD :
1500+ case REQ_OP_SECURE_ERASE :
1501+ case REQ_OP_WRITE_ZEROES :
1502+ return true;
1503+ default :
1504+ break ;
1505+ }
15021506 }
15031507
1504- return r ;
1508+ return false ;
15051509}
15061510
1507- static bool __process_abnormal_io (struct clone_info * ci , struct dm_target * ti ,
1508- blk_status_t * status )
1511+ static blk_status_t __process_abnormal_io (struct clone_info * ci ,
1512+ struct dm_target * ti )
15091513{
15101514 unsigned num_bios = 0 ;
15111515
@@ -1519,8 +1523,6 @@ static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
15191523 case REQ_OP_WRITE_ZEROES :
15201524 num_bios = ti -> num_write_zeroes_bios ;
15211525 break ;
1522- default :
1523- return false;
15241526 }
15251527
15261528 /*
@@ -1530,12 +1532,10 @@ static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
15301532 * check was performed.
15311533 */
15321534 if (unlikely (!num_bios ))
1533- * status = BLK_STS_NOTSUPP ;
1534- else {
1535- __send_changing_extent_only (ci , ti , num_bios );
1536- * status = BLK_STS_OK ;
1537- }
1538- return true;
1535+ return BLK_STS_NOTSUPP ;
1536+
1537+ __send_changing_extent_only (ci , ti , num_bios );
1538+ return BLK_STS_OK ;
15391539}
15401540
15411541/*
@@ -1588,11 +1588,12 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci)
15881588 struct bio * clone ;
15891589 struct dm_target * ti ;
15901590 unsigned len ;
1591- blk_status_t error = BLK_STS_IOERR ;
15921591
15931592 ti = dm_table_find_target (ci -> map , ci -> sector );
1594- if (unlikely (!ti || __process_abnormal_io (ci , ti , & error )))
1595- return error ;
1593+ if (unlikely (!ti ))
1594+ return BLK_STS_IOERR ;
1595+ else if (unlikely (ci -> is_abnormal_io ))
1596+ return __process_abnormal_io (ci , ti );
15961597
15971598 /*
15981599 * Only support bio polling for normal IO, and the target io is
@@ -1612,11 +1613,12 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci)
16121613}
16131614
16141615static void init_clone_info (struct clone_info * ci , struct mapped_device * md ,
1615- struct dm_table * map , struct bio * bio )
1616+ struct dm_table * map , struct bio * bio , bool is_abnormal )
16161617{
16171618 ci -> map = map ;
16181619 ci -> io = alloc_io (md , bio );
16191620 ci -> bio = bio ;
1621+ ci -> is_abnormal_io = is_abnormal ;
16201622 ci -> submit_as_polled = false;
16211623 ci -> sector = bio -> bi_iter .bi_sector ;
16221624 ci -> sector_count = bio_sectors (bio );
@@ -1636,8 +1638,18 @@ static void dm_split_and_process_bio(struct mapped_device *md,
16361638 struct clone_info ci ;
16371639 struct dm_io * io ;
16381640 blk_status_t error = BLK_STS_OK ;
1641+ bool is_abnormal ;
16391642
1640- init_clone_info (& ci , md , map , bio );
1643+ is_abnormal = is_abnormal_io (bio );
1644+ if (unlikely (is_abnormal )) {
1645+ /*
1646+ * Use blk_queue_split() for abnormal IO (e.g. discard, etc)
1647+ * otherwise associated queue_limits won't be imposed.
1648+ */
1649+ blk_queue_split (& bio );
1650+ }
1651+
1652+ init_clone_info (& ci , md , map , bio , is_abnormal );
16411653 io = ci .io ;
16421654
16431655 if (bio -> bi_opf & REQ_PREFLUSH ) {
@@ -1697,13 +1709,6 @@ static void dm_submit_bio(struct bio *bio)
16971709 goto out ;
16981710 }
16991711
1700- /*
1701- * Use blk_queue_split() for abnormal IO (e.g. discard, writesame, etc)
1702- * otherwise associated queue_limits won't be imposed.
1703- */
1704- if (unlikely (is_abnormal_io (bio )))
1705- blk_queue_split (& bio );
1706-
17071712 dm_split_and_process_bio (md , map , bio );
17081713out :
17091714 dm_put_live_table_bio (md , srcu_idx , bio );
0 commit comments