@@ -1412,14 +1412,56 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
1412
1412
submit_bio_noacct (read_bio );
1413
1413
}
1414
1414
1415
+ static bool wait_blocked_rdev (struct mddev * mddev , struct bio * bio )
1416
+ {
1417
+ struct r1conf * conf = mddev -> private ;
1418
+ int disks = conf -> raid_disks * 2 ;
1419
+ int i ;
1420
+
1421
+ retry :
1422
+ for (i = 0 ; i < disks ; i ++ ) {
1423
+ struct md_rdev * rdev = conf -> mirrors [i ].rdev ;
1424
+
1425
+ if (!rdev )
1426
+ continue ;
1427
+
1428
+ if (test_bit (Blocked , & rdev -> flags )) {
1429
+ if (bio -> bi_opf & REQ_NOWAIT )
1430
+ return false;
1431
+
1432
+ mddev_add_trace_msg (rdev -> mddev , "raid1 wait rdev %d blocked" ,
1433
+ rdev -> raid_disk );
1434
+ atomic_inc (& rdev -> nr_pending );
1435
+ md_wait_for_blocked_rdev (rdev , rdev -> mddev );
1436
+ goto retry ;
1437
+ }
1438
+
1439
+ /* don't write here until the bad block is acknowledged */
1440
+ if (test_bit (WriteErrorSeen , & rdev -> flags ) &&
1441
+ rdev_has_badblock (rdev , bio -> bi_iter .bi_sector ,
1442
+ bio_sectors (bio )) < 0 ) {
1443
+ if (bio -> bi_opf & REQ_NOWAIT )
1444
+ return false;
1445
+
1446
+ set_bit (BlockedBadBlocks , & rdev -> flags );
1447
+ mddev_add_trace_msg (rdev -> mddev , "raid1 wait rdev %d blocked" ,
1448
+ rdev -> raid_disk );
1449
+ atomic_inc (& rdev -> nr_pending );
1450
+ md_wait_for_blocked_rdev (rdev , rdev -> mddev );
1451
+ goto retry ;
1452
+ }
1453
+ }
1454
+
1455
+ return true;
1456
+ }
1457
+
1415
1458
static void raid1_write_request (struct mddev * mddev , struct bio * bio ,
1416
1459
int max_write_sectors )
1417
1460
{
1418
1461
struct r1conf * conf = mddev -> private ;
1419
1462
struct r1bio * r1_bio ;
1420
1463
int i , disks ;
1421
1464
unsigned long flags ;
1422
- struct md_rdev * blocked_rdev ;
1423
1465
int first_clone ;
1424
1466
int max_sectors ;
1425
1467
bool write_behind = false;
@@ -1457,7 +1499,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1457
1499
return ;
1458
1500
}
1459
1501
1460
- retry_write :
1502
+ if (!wait_blocked_rdev (mddev , bio )) {
1503
+ bio_wouldblock_error (bio );
1504
+ return ;
1505
+ }
1506
+
1461
1507
r1_bio = alloc_r1bio (mddev , bio );
1462
1508
r1_bio -> sectors = max_write_sectors ;
1463
1509
@@ -1473,7 +1519,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1473
1519
*/
1474
1520
1475
1521
disks = conf -> raid_disks * 2 ;
1476
- blocked_rdev = NULL ;
1477
1522
max_sectors = r1_bio -> sectors ;
1478
1523
for (i = 0 ; i < disks ; i ++ ) {
1479
1524
struct md_rdev * rdev = conf -> mirrors [i ].rdev ;
@@ -1486,11 +1531,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1486
1531
if (!is_discard && rdev && test_bit (WriteMostly , & rdev -> flags ))
1487
1532
write_behind = true;
1488
1533
1489
- if (rdev && unlikely (test_bit (Blocked , & rdev -> flags ))) {
1490
- atomic_inc (& rdev -> nr_pending );
1491
- blocked_rdev = rdev ;
1492
- break ;
1493
- }
1494
1534
r1_bio -> bios [i ] = NULL ;
1495
1535
if (!rdev || test_bit (Faulty , & rdev -> flags )) {
1496
1536
if (i < conf -> raid_disks )
@@ -1506,13 +1546,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1506
1546
1507
1547
is_bad = is_badblock (rdev , r1_bio -> sector , max_sectors ,
1508
1548
& first_bad , & bad_sectors );
1509
- if (is_bad < 0 ) {
1510
- /* mustn't write here until the bad block is
1511
- * acknowledged*/
1512
- set_bit (BlockedBadBlocks , & rdev -> flags );
1513
- blocked_rdev = rdev ;
1514
- break ;
1515
- }
1516
1549
if (is_bad && first_bad <= r1_bio -> sector ) {
1517
1550
/* Cannot write here at all */
1518
1551
bad_sectors -= (r1_bio -> sector - first_bad );
@@ -1543,27 +1576,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1543
1576
r1_bio -> bios [i ] = bio ;
1544
1577
}
1545
1578
1546
- if (unlikely (blocked_rdev )) {
1547
- /* Wait for this device to become unblocked */
1548
- int j ;
1549
-
1550
- for (j = 0 ; j < i ; j ++ )
1551
- if (r1_bio -> bios [j ])
1552
- rdev_dec_pending (conf -> mirrors [j ].rdev , mddev );
1553
- mempool_free (r1_bio , & conf -> r1bio_pool );
1554
- allow_barrier (conf , bio -> bi_iter .bi_sector );
1555
-
1556
- if (bio -> bi_opf & REQ_NOWAIT ) {
1557
- bio_wouldblock_error (bio );
1558
- return ;
1559
- }
1560
- mddev_add_trace_msg (mddev , "raid1 wait rdev %d blocked" ,
1561
- blocked_rdev -> raid_disk );
1562
- md_wait_for_blocked_rdev (blocked_rdev , mddev );
1563
- wait_barrier (conf , bio -> bi_iter .bi_sector , false);
1564
- goto retry_write ;
1565
- }
1566
-
1567
1579
/*
1568
1580
* When using a bitmap, we may call alloc_behind_master_bio below.
1569
1581
* alloc_behind_master_bio allocates a copy of the data payload a page
0 commit comments