@@ -1572,10 +1572,12 @@ void bch2_ec_do_stripe_creates(struct bch_fs *c)
15721572 bch2_write_ref_put (c , BCH_WRITE_REF_stripe_create );
15731573}
15741574
1575- static void ec_stripe_set_pending (struct bch_fs * c , struct ec_stripe_head * h )
1575+ static void ec_stripe_new_set_pending (struct bch_fs * c , struct ec_stripe_head * h )
15761576{
15771577 struct ec_stripe_new * s = h -> s ;
15781578
1579+ lockdep_assert_held (& h -> lock );
1580+
15791581 BUG_ON (!s -> allocated && !s -> err );
15801582
15811583 h -> s = NULL ;
@@ -1588,6 +1590,12 @@ static void ec_stripe_set_pending(struct bch_fs *c, struct ec_stripe_head *h)
15881590 ec_stripe_new_put (c , s , STRIPE_REF_io );
15891591}
15901592
1593+ static void ec_stripe_new_cancel (struct bch_fs * c , struct ec_stripe_head * h , int err )
1594+ {
1595+ h -> s -> err = err ;
1596+ ec_stripe_new_set_pending (c , h );
1597+ }
1598+
15911599void bch2_ec_bucket_cancel (struct bch_fs * c , struct open_bucket * ob )
15921600{
15931601 struct ec_stripe_new * s = ob -> ec ;
@@ -1711,27 +1719,14 @@ static int ec_new_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h)
17111719 return 0 ;
17121720}
17131721
1714- static struct ec_stripe_head *
1715- ec_new_stripe_head_alloc (struct bch_fs * c , unsigned disk_label ,
1716- unsigned algo , unsigned redundancy ,
1717- enum bch_watermark watermark )
1722+ static void ec_stripe_head_devs_update (struct bch_fs * c , struct ec_stripe_head * h )
17181723{
1719- struct ec_stripe_head * h ;
1720-
1721- h = kzalloc (sizeof (* h ), GFP_KERNEL );
1722- if (!h )
1723- return NULL ;
1724-
1725- mutex_init (& h -> lock );
1726- BUG_ON (!mutex_trylock (& h -> lock ));
1727-
1728- h -> disk_label = disk_label ;
1729- h -> algo = algo ;
1730- h -> redundancy = redundancy ;
1731- h -> watermark = watermark ;
1724+ struct bch_devs_mask devs = h -> devs ;
17321725
17331726 rcu_read_lock ();
1734- h -> devs = target_rw_devs (c , BCH_DATA_user , disk_label ? group_to_target (disk_label - 1 ) : 0 );
1727+ h -> devs = target_rw_devs (c , BCH_DATA_user , h -> disk_label
1728+ ? group_to_target (h -> disk_label - 1 )
1729+ : 0 );
17351730 unsigned nr_devs = dev_mask_nr (& h -> devs );
17361731
17371732 for_each_member_device_rcu (c , ca , & h -> devs )
@@ -1741,6 +1736,7 @@ ec_new_stripe_head_alloc(struct bch_fs *c, unsigned disk_label,
17411736
17421737 h -> blocksize = pick_blocksize (c , & h -> devs );
17431738
1739+ h -> nr_active_devs = 0 ;
17441740 for_each_member_device_rcu (c , ca , & h -> devs )
17451741 if (ca -> mi .bucket_size == h -> blocksize )
17461742 h -> nr_active_devs ++ ;
@@ -1751,7 +1747,9 @@ ec_new_stripe_head_alloc(struct bch_fs *c, unsigned disk_label,
17511747 * If we only have redundancy + 1 devices, we're better off with just
17521748 * replication:
17531749 */
1754- if (h -> nr_active_devs < h -> redundancy + 2 ) {
1750+ h -> insufficient_devs = h -> nr_active_devs < h -> redundancy + 2 ;
1751+
1752+ if (h -> insufficient_devs ) {
17551753 const char * err ;
17561754
17571755 if (nr_devs < h -> redundancy + 2 )
@@ -1766,6 +1764,34 @@ ec_new_stripe_head_alloc(struct bch_fs *c, unsigned disk_label,
17661764 h -> nr_active_devs , h -> redundancy + 2 , err );
17671765 }
17681766
1767+ struct bch_devs_mask devs_leaving ;
1768+ bitmap_andnot (devs_leaving .d , devs .d , h -> devs .d , BCH_SB_MEMBERS_MAX );
1769+
1770+ if (h -> s && !h -> s -> allocated && dev_mask_nr (& devs_leaving ))
1771+ ec_stripe_new_cancel (c , h , - EINTR );
1772+
1773+ h -> rw_devs_change_count = c -> rw_devs_change_count ;
1774+ }
1775+
1776+ static struct ec_stripe_head *
1777+ ec_new_stripe_head_alloc (struct bch_fs * c , unsigned disk_label ,
1778+ unsigned algo , unsigned redundancy ,
1779+ enum bch_watermark watermark )
1780+ {
1781+ struct ec_stripe_head * h ;
1782+
1783+ h = kzalloc (sizeof (* h ), GFP_KERNEL );
1784+ if (!h )
1785+ return NULL ;
1786+
1787+ mutex_init (& h -> lock );
1788+ BUG_ON (!mutex_trylock (& h -> lock ));
1789+
1790+ h -> disk_label = disk_label ;
1791+ h -> algo = algo ;
1792+ h -> redundancy = redundancy ;
1793+ h -> watermark = watermark ;
1794+
17691795 list_add (& h -> list , & c -> ec_stripe_head_list );
17701796 return h ;
17711797}
@@ -1776,7 +1802,7 @@ void bch2_ec_stripe_head_put(struct bch_fs *c, struct ec_stripe_head *h)
17761802 h -> s -> allocated &&
17771803 bitmap_weight (h -> s -> blocks_allocated ,
17781804 h -> s -> nr_data ) == h -> s -> nr_data )
1779- ec_stripe_set_pending (c , h );
1805+ ec_stripe_new_set_pending (c , h );
17801806
17811807 mutex_unlock (& h -> lock );
17821808}
@@ -1801,7 +1827,7 @@ __bch2_ec_stripe_head_get(struct btree_trans *trans,
18011827
18021828 if (test_bit (BCH_FS_going_ro , & c -> flags )) {
18031829 h = ERR_PTR (- BCH_ERR_erofs_no_writes );
1804- goto found ;
1830+ goto err ;
18051831 }
18061832
18071833 list_for_each_entry (h , & c -> ec_stripe_head_list , list )
@@ -1810,18 +1836,23 @@ __bch2_ec_stripe_head_get(struct btree_trans *trans,
18101836 h -> redundancy == redundancy &&
18111837 h -> watermark == watermark ) {
18121838 ret = bch2_trans_mutex_lock (trans , & h -> lock );
1813- if (ret )
1839+ if (ret ) {
18141840 h = ERR_PTR (ret );
1841+ goto err ;
1842+ }
18151843 goto found ;
18161844 }
18171845
18181846 h = ec_new_stripe_head_alloc (c , disk_label , algo , redundancy , watermark );
18191847found :
1820- if (!IS_ERR_OR_NULL (h ) &&
1821- h -> nr_active_devs < h -> redundancy + 2 ) {
1848+ if (h -> rw_devs_change_count != c -> rw_devs_change_count )
1849+ ec_stripe_head_devs_update (c , h );
1850+
1851+ if (h -> insufficient_devs ) {
18221852 mutex_unlock (& h -> lock );
18231853 h = NULL ;
18241854 }
1855+ err :
18251856 mutex_unlock (& c -> ec_stripe_head_lock );
18261857 return h ;
18271858}
@@ -2261,8 +2292,7 @@ static void __bch2_ec_stop(struct bch_fs *c, struct bch_dev *ca)
22612292 }
22622293 goto unlock ;
22632294found :
2264- h -> s -> err = - BCH_ERR_erofs_no_writes ;
2265- ec_stripe_set_pending (c , h );
2295+ ec_stripe_new_cancel (c , h , - BCH_ERR_erofs_no_writes );
22662296unlock :
22672297 mutex_unlock (& h -> lock );
22682298 }
0 commit comments