@@ -407,9 +407,9 @@ void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
407
407
BUG_ON (c -> opts .nochanges );
408
408
409
409
bkey_for_each_ptr (ptrs , ptr ) {
410
- BUG_ON (! bch2_dev_exists ( c , ptr -> dev ));
411
-
412
- struct bch_dev * ca = bch2_dev_bkey_exists (c , ptr -> dev );
410
+ struct bch_dev * ca = nocow
411
+ ? bch2_dev_have_ref ( c , ptr -> dev )
412
+ : bch2_dev_get_ioref2 (c , ptr -> dev , type == BCH_DATA_btree ? READ : WRITE );
413
413
414
414
if (to_entry (ptr + 1 ) < ptrs .end ) {
415
415
n = to_wbio (bio_alloc_clone (NULL , & wbio -> bio , GFP_NOFS , & c -> replica_set ));
@@ -429,8 +429,7 @@ void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
429
429
430
430
n -> c = c ;
431
431
n -> dev = ptr -> dev ;
432
- n -> have_ioref = nocow || bch2_dev_get_ioref (ca ,
433
- type == BCH_DATA_btree ? READ : WRITE );
432
+ n -> have_ioref = ca != NULL ;
434
433
n -> nocow = nocow ;
435
434
n -> submit_time = local_clock ();
436
435
n -> inode_offset = bkey_start_offset (& k -> k );
@@ -650,7 +649,9 @@ static void bch2_write_endio(struct bio *bio)
650
649
struct bch_write_bio * wbio = to_wbio (bio );
651
650
struct bch_write_bio * parent = wbio -> split ? wbio -> parent : NULL ;
652
651
struct bch_fs * c = wbio -> c ;
653
- struct bch_dev * ca = bch2_dev_bkey_exists (c , wbio -> dev );
652
+ struct bch_dev * ca = wbio -> have_ioref
653
+ ? bch2_dev_have_ref (c , wbio -> dev )
654
+ : NULL ;
654
655
655
656
if (bch2_dev_inum_io_err_on (bio -> bi_status , ca , BCH_MEMBER_ERROR_write ,
656
657
op -> pos .inode ,
@@ -1264,15 +1265,15 @@ static void bch2_nocow_write(struct bch_write_op *op)
1264
1265
/* Get iorefs before dropping btree locks: */
1265
1266
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c (k );
1266
1267
bkey_for_each_ptr (ptrs , ptr ) {
1267
- struct bch_dev * ca = bch2_dev_bkey_exists (c , ptr -> dev );
1268
+ struct bch_dev * ca = bch2_dev_get_ioref2 (c , ptr -> dev , WRITE );
1269
+ if (unlikely (!ca ))
1270
+ goto err_get_ioref ;
1271
+
1268
1272
struct bpos b = PTR_BUCKET_POS (ca , ptr );
1269
1273
struct nocow_lock_bucket * l =
1270
1274
bucket_nocow_lock (& c -> nocow_locks , bucket_to_u64 (b ));
1271
1275
prefetch (l );
1272
1276
1273
- if (unlikely (!bch2_dev_get_ioref (ca , WRITE )))
1274
- goto err_get_ioref ;
1275
-
1276
1277
/* XXX allocating memory with btree locks held - rare */
1277
1278
darray_push_gfp (& buckets , ((struct bucket_to_lock ) {
1278
1279
.b = b , .gen = ptr -> gen , .l = l ,
0 commit comments