@@ -590,7 +590,9 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
590
590
io = container_of (tio , struct dm_io , tio );
591
591
io -> magic = DM_IO_MAGIC ;
592
592
io -> status = BLK_STS_OK ;
593
- atomic_set (& io -> io_count , 1 );
593
+
594
+ /* one ref is for submission, the other is for completion */
595
+ atomic_set (& io -> io_count , 2 );
594
596
this_cpu_inc (* md -> pending_io );
595
597
io -> orig_bio = bio ;
596
598
io -> md = md ;
@@ -955,11 +957,6 @@ static void dm_io_complete(struct dm_io *io)
955
957
}
956
958
}
957
959
958
- static void dm_io_inc_pending (struct dm_io * io )
959
- {
960
- atomic_inc (& io -> io_count );
961
- }
962
-
963
960
/*
964
961
* Decrements the number of outstanding ios that a bio has been
965
962
* cloned into, completing the original io if necc.
@@ -1316,7 +1313,6 @@ static void __map_bio(struct bio *clone)
1316
1313
/*
1317
1314
* Map the clone.
1318
1315
*/
1319
- dm_io_inc_pending (io );
1320
1316
tio -> old_sector = clone -> bi_iter .bi_sector ;
1321
1317
1322
1318
if (static_branch_unlikely (& swap_bios_enabled ) &&
@@ -1426,11 +1422,12 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
1426
1422
}
1427
1423
}
1428
1424
1429
- static void __send_duplicate_bios (struct clone_info * ci , struct dm_target * ti ,
1425
+ static int __send_duplicate_bios (struct clone_info * ci , struct dm_target * ti ,
1430
1426
unsigned num_bios , unsigned * len )
1431
1427
{
1432
1428
struct bio_list blist = BIO_EMPTY_LIST ;
1433
1429
struct bio * clone ;
1430
+ int ret = 0 ;
1434
1431
1435
1432
switch (num_bios ) {
1436
1433
case 0 :
@@ -1440,16 +1437,20 @@ static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
1440
1437
setup_split_accounting (ci , * len );
1441
1438
clone = alloc_tio (ci , ti , 0 , len , GFP_NOIO );
1442
1439
__map_bio (clone );
1440
+ ret = 1 ;
1443
1441
break ;
1444
1442
default :
1445
1443
/* dm_accept_partial_bio() is not supported with shared tio->len_ptr */
1446
1444
alloc_multiple_bios (& blist , ci , ti , num_bios );
1447
1445
while ((clone = bio_list_pop (& blist ))) {
1448
1446
dm_tio_set_flag (clone_to_tio (clone ), DM_TIO_IS_DUPLICATE_BIO );
1449
1447
__map_bio (clone );
1448
+ ret += 1 ;
1450
1449
}
1451
1450
break ;
1452
1451
}
1452
+
1453
+ return ret ;
1453
1454
}
1454
1455
1455
1456
static void __send_empty_flush (struct clone_info * ci )
@@ -1470,8 +1471,19 @@ static void __send_empty_flush(struct clone_info *ci)
1470
1471
ci -> sector_count = 0 ;
1471
1472
ci -> io -> tio .clone .bi_iter .bi_size = 0 ;
1472
1473
1473
- while ((ti = dm_table_get_target (ci -> map , target_nr ++ )))
1474
- __send_duplicate_bios (ci , ti , ti -> num_flush_bios , NULL );
1474
+ while ((ti = dm_table_get_target (ci -> map , target_nr ++ ))) {
1475
+ int bios ;
1476
+
1477
+ atomic_add (ti -> num_flush_bios , & ci -> io -> io_count );
1478
+ bios = __send_duplicate_bios (ci , ti , ti -> num_flush_bios , NULL );
1479
+ atomic_sub (ti -> num_flush_bios - bios , & ci -> io -> io_count );
1480
+ }
1481
+
1482
+ /*
1483
+ * alloc_io() takes one extra reference for submission, so the
1484
+ * reference won't reach 0 without the following subtraction
1485
+ */
1486
+ atomic_sub (1 , & ci -> io -> io_count );
1475
1487
1476
1488
bio_uninit (ci -> bio );
1477
1489
}
@@ -1480,11 +1492,18 @@ static void __send_changing_extent_only(struct clone_info *ci, struct dm_target
1480
1492
unsigned num_bios )
1481
1493
{
1482
1494
unsigned len ;
1495
+ int bios ;
1483
1496
1484
1497
len = min_t (sector_t , ci -> sector_count ,
1485
1498
max_io_len_target_boundary (ti , dm_target_offset (ti , ci -> sector )));
1486
1499
1487
- __send_duplicate_bios (ci , ti , num_bios , & len );
1500
+ atomic_add (num_bios , & ci -> io -> io_count );
1501
+ bios = __send_duplicate_bios (ci , ti , num_bios , & len );
1502
+ /*
1503
+ * alloc_io() takes one extra reference for submission, so the
1504
+ * reference won't reach 0 without the following (+1) subtraction
1505
+ */
1506
+ atomic_sub (num_bios - bios + 1 , & ci -> io -> io_count );
1488
1507
1489
1508
ci -> sector += len ;
1490
1509
ci -> sector_count -= len ;
@@ -1669,9 +1688,15 @@ static void dm_split_and_process_bio(struct mapped_device *md,
1669
1688
* Add every dm_io instance into the hlist_head which is stored in
1670
1689
* bio->bi_private, so that dm_poll_bio can poll them all.
1671
1690
*/
1672
- if (error || !ci .submit_as_polled )
1673
- dm_io_dec_pending (ci .io , error );
1674
- else
1691
+ if (error || !ci .submit_as_polled ) {
1692
+ /*
1693
+ * In case of submission failure, the extra reference for
1694
+ * submitting io isn't consumed yet
1695
+ */
1696
+ if (error )
1697
+ atomic_dec (& io -> io_count );
1698
+ dm_io_dec_pending (io , error );
1699
+ } else
1675
1700
dm_queue_poll_io (bio , io );
1676
1701
}
1677
1702
0 commit comments