@@ -800,7 +800,6 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
800
800
struct bch_fs * c = trans -> c ;
801
801
struct extent_ptr_decoded pick ;
802
802
struct bch_read_bio * rbio = NULL ;
803
- struct bch_dev * ca = NULL ;
804
803
struct promote_op * promote = NULL ;
805
804
bool bounce = false, read_full = false, narrow_crcs = false;
806
805
struct bpos data_pos = bkey_start_pos (k .k );
@@ -831,7 +830,7 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
831
830
goto err ;
832
831
}
833
832
834
- ca = bch2_dev_bkey_exists (c , pick .ptr .dev );
833
+ struct bch_dev * ca = bch2_dev_get_ioref2 (c , pick .ptr .dev , READ );
835
834
836
835
/*
837
836
* Stale dirty pointers are treated as IO errors, but @failed isn't
@@ -841,9 +840,11 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
841
840
*/
842
841
if ((flags & BCH_READ_IN_RETRY ) &&
843
842
!pick .ptr .cached &&
843
+ ca &&
844
844
unlikely (dev_ptr_stale (ca , & pick .ptr ))) {
845
845
read_from_stale_dirty_pointer (trans , ca , k , pick .ptr );
846
846
bch2_mark_io_failure (failed , & pick );
847
+ percpu_ref_put (& ca -> io_ref );
847
848
goto retry_pick ;
848
849
}
849
850
@@ -858,8 +859,11 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
858
859
* can happen if we retry, and the extent we were going to read
859
860
* has been merged in the meantime:
860
861
*/
861
- if (pick .crc .compressed_size > orig -> bio .bi_vcnt * PAGE_SECTORS )
862
+ if (pick .crc .compressed_size > orig -> bio .bi_vcnt * PAGE_SECTORS ) {
863
+ if (ca )
864
+ percpu_ref_put (& ca -> io_ref );
862
865
goto hole ;
866
+ }
863
867
864
868
iter .bi_size = pick .crc .compressed_size << 9 ;
865
869
goto get_bio ;
@@ -964,7 +968,7 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
964
968
rbio -> bvec_iter = iter ;
965
969
rbio -> offset_into_extent = offset_into_extent ;
966
970
rbio -> flags = flags ;
967
- rbio -> have_ioref = pick_ret > 0 && bch2_dev_get_ioref ( ca , READ ) ;
971
+ rbio -> have_ioref = ca != NULL ;
968
972
rbio -> narrow_crcs = narrow_crcs ;
969
973
rbio -> hole = 0 ;
970
974
rbio -> retry = 0 ;
@@ -994,7 +998,7 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
994
998
* If it's being moved internally, we don't want to flag it as a cache
995
999
* hit:
996
1000
*/
997
- if (pick .ptr .cached && !(flags & BCH_READ_NODECODE ))
1001
+ if (ca && pick .ptr .cached && !(flags & BCH_READ_NODECODE ))
998
1002
bch2_bucket_io_time_reset (trans , pick .ptr .dev ,
999
1003
PTR_BUCKET_NR (ca , & pick .ptr ), READ );
1000
1004
0 commit comments