@@ -1138,7 +1138,7 @@ static struct btree *btree_node_alloc_replacement(struct btree *b,
1138
1138
{
1139
1139
struct btree * n = bch_btree_node_alloc (b -> c , op , b -> level , b -> parent );
1140
1140
1141
- if (!IS_ERR_OR_NULL (n )) {
1141
+ if (!IS_ERR (n )) {
1142
1142
mutex_lock (& n -> write_lock );
1143
1143
bch_btree_sort_into (& b -> keys , & n -> keys , & b -> c -> sort );
1144
1144
bkey_copy_key (& n -> key , & b -> key );
@@ -1340,7 +1340,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
1340
1340
memset (new_nodes , 0 , sizeof (new_nodes ));
1341
1341
closure_init_stack (& cl );
1342
1342
1343
- while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL (r [nodes ].b ))
1343
+ while (nodes < GC_MERGE_NODES && !IS_ERR (r [nodes ].b ))
1344
1344
keys += r [nodes ++ ].keys ;
1345
1345
1346
1346
blocks = btree_default_blocks (b -> c ) * 2 / 3 ;
@@ -1352,7 +1352,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
1352
1352
1353
1353
for (i = 0 ; i < nodes ; i ++ ) {
1354
1354
new_nodes [i ] = btree_node_alloc_replacement (r [i ].b , NULL );
1355
- if (IS_ERR_OR_NULL (new_nodes [i ]))
1355
+ if (IS_ERR (new_nodes [i ]))
1356
1356
goto out_nocoalesce ;
1357
1357
}
1358
1358
@@ -1487,7 +1487,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
1487
1487
bch_keylist_free (& keylist );
1488
1488
1489
1489
for (i = 0 ; i < nodes ; i ++ )
1490
- if (!IS_ERR_OR_NULL (new_nodes [i ])) {
1490
+ if (!IS_ERR (new_nodes [i ])) {
1491
1491
btree_node_free (new_nodes [i ]);
1492
1492
rw_unlock (true, new_nodes [i ]);
1493
1493
}
@@ -1669,7 +1669,7 @@ static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1669
1669
if (should_rewrite ) {
1670
1670
n = btree_node_alloc_replacement (b , NULL );
1671
1671
1672
- if (!IS_ERR_OR_NULL (n )) {
1672
+ if (!IS_ERR (n )) {
1673
1673
bch_btree_node_write_sync (n );
1674
1674
1675
1675
bch_btree_set_root (n );
0 commit comments