@@ -1412,14 +1412,56 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
14121412 submit_bio_noacct (read_bio );
14131413}
14141414
1415+ static bool wait_blocked_rdev (struct mddev * mddev , struct bio * bio )
1416+ {
1417+ struct r1conf * conf = mddev -> private ;
1418+ int disks = conf -> raid_disks * 2 ;
1419+ int i ;
1420+
1421+ retry :
1422+ for (i = 0 ; i < disks ; i ++ ) {
1423+ struct md_rdev * rdev = conf -> mirrors [i ].rdev ;
1424+
1425+ if (!rdev )
1426+ continue ;
1427+
1428+ if (test_bit (Blocked , & rdev -> flags )) {
1429+ if (bio -> bi_opf & REQ_NOWAIT )
1430+ return false;
1431+
1432+ mddev_add_trace_msg (rdev -> mddev , "raid1 wait rdev %d blocked" ,
1433+ rdev -> raid_disk );
1434+ atomic_inc (& rdev -> nr_pending );
1435+ md_wait_for_blocked_rdev (rdev , rdev -> mddev );
1436+ goto retry ;
1437+ }
1438+
1439+ /* don't write here until the bad block is acknowledged */
1440+ if (test_bit (WriteErrorSeen , & rdev -> flags ) &&
1441+ rdev_has_badblock (rdev , bio -> bi_iter .bi_sector ,
1442+ bio_sectors (bio )) < 0 ) {
1443+ if (bio -> bi_opf & REQ_NOWAIT )
1444+ return false;
1445+
1446+ set_bit (BlockedBadBlocks , & rdev -> flags );
1447+ mddev_add_trace_msg (rdev -> mddev , "raid1 wait rdev %d blocked" ,
1448+ rdev -> raid_disk );
1449+ atomic_inc (& rdev -> nr_pending );
1450+ md_wait_for_blocked_rdev (rdev , rdev -> mddev );
1451+ goto retry ;
1452+ }
1453+ }
1454+
1455+ return true;
1456+ }
1457+
14151458static void raid1_write_request (struct mddev * mddev , struct bio * bio ,
14161459 int max_write_sectors )
14171460{
14181461 struct r1conf * conf = mddev -> private ;
14191462 struct r1bio * r1_bio ;
14201463 int i , disks ;
14211464 unsigned long flags ;
1422- struct md_rdev * blocked_rdev ;
14231465 int first_clone ;
14241466 int max_sectors ;
14251467 bool write_behind = false;
@@ -1457,7 +1499,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
14571499 return ;
14581500 }
14591501
1460- retry_write :
1502+ if (!wait_blocked_rdev (mddev , bio )) {
1503+ bio_wouldblock_error (bio );
1504+ return ;
1505+ }
1506+
14611507 r1_bio = alloc_r1bio (mddev , bio );
14621508 r1_bio -> sectors = max_write_sectors ;
14631509
@@ -1473,7 +1519,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
14731519 */
14741520
14751521 disks = conf -> raid_disks * 2 ;
1476- blocked_rdev = NULL ;
14771522 max_sectors = r1_bio -> sectors ;
14781523 for (i = 0 ; i < disks ; i ++ ) {
14791524 struct md_rdev * rdev = conf -> mirrors [i ].rdev ;
@@ -1486,11 +1531,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
14861531 if (!is_discard && rdev && test_bit (WriteMostly , & rdev -> flags ))
14871532 write_behind = true;
14881533
1489- if (rdev && unlikely (test_bit (Blocked , & rdev -> flags ))) {
1490- atomic_inc (& rdev -> nr_pending );
1491- blocked_rdev = rdev ;
1492- break ;
1493- }
14941534 r1_bio -> bios [i ] = NULL ;
14951535 if (!rdev || test_bit (Faulty , & rdev -> flags )) {
14961536 if (i < conf -> raid_disks )
@@ -1506,13 +1546,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
15061546
15071547 is_bad = is_badblock (rdev , r1_bio -> sector , max_sectors ,
15081548 & first_bad , & bad_sectors );
1509- if (is_bad < 0 ) {
1510- /* mustn't write here until the bad block is
1511- * acknowledged*/
1512- set_bit (BlockedBadBlocks , & rdev -> flags );
1513- blocked_rdev = rdev ;
1514- break ;
1515- }
15161549 if (is_bad && first_bad <= r1_bio -> sector ) {
15171550 /* Cannot write here at all */
15181551 bad_sectors -= (r1_bio -> sector - first_bad );
@@ -1543,27 +1576,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
15431576 r1_bio -> bios [i ] = bio ;
15441577 }
15451578
1546- if (unlikely (blocked_rdev )) {
1547- /* Wait for this device to become unblocked */
1548- int j ;
1549-
1550- for (j = 0 ; j < i ; j ++ )
1551- if (r1_bio -> bios [j ])
1552- rdev_dec_pending (conf -> mirrors [j ].rdev , mddev );
1553- mempool_free (r1_bio , & conf -> r1bio_pool );
1554- allow_barrier (conf , bio -> bi_iter .bi_sector );
1555-
1556- if (bio -> bi_opf & REQ_NOWAIT ) {
1557- bio_wouldblock_error (bio );
1558- return ;
1559- }
1560- mddev_add_trace_msg (mddev , "raid1 wait rdev %d blocked" ,
1561- blocked_rdev -> raid_disk );
1562- md_wait_for_blocked_rdev (blocked_rdev , mddev );
1563- wait_barrier (conf , bio -> bi_iter .bi_sector , false);
1564- goto retry_write ;
1565- }
1566-
15671579 /*
15681580 * When using a bitmap, we may call alloc_behind_master_bio below.
15691581 * alloc_behind_master_bio allocates a copy of the data payload a page
0 commit comments