@@ -317,6 +317,12 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans,
317
317
: 0 ;
318
318
int ret ;
319
319
320
+ b = bch2_btree_node_mem_alloc (trans , interior_node );
321
+ if (IS_ERR (b ))
322
+ return b ;
323
+
324
+ BUG_ON (b -> ob .nr );
325
+
320
326
mutex_lock (& c -> btree_reserve_cache_lock );
321
327
if (c -> btree_reserve_cache_nr > nr_reserve ) {
322
328
struct btree_alloc * a =
@@ -325,10 +331,9 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans,
325
331
obs = a -> ob ;
326
332
bkey_copy (& tmp .k , & a -> k );
327
333
mutex_unlock (& c -> btree_reserve_cache_lock );
328
- goto mem_alloc ;
334
+ goto out ;
329
335
}
330
336
mutex_unlock (& c -> btree_reserve_cache_lock );
331
-
332
337
retry :
333
338
ret = bch2_alloc_sectors_start_trans (trans ,
334
339
c -> opts .metadata_target ?:
@@ -341,7 +346,7 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans,
341
346
c -> opts .metadata_replicas_required ),
342
347
watermark , 0 , cl , & wp );
343
348
if (unlikely (ret ))
344
- return ERR_PTR ( ret ) ;
349
+ goto err ;
345
350
346
351
if (wp -> sectors_free < btree_sectors (c )) {
347
352
struct open_bucket * ob ;
@@ -360,19 +365,16 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans,
360
365
361
366
bch2_open_bucket_get (c , wp , & obs );
362
367
bch2_alloc_sectors_done (c , wp );
363
- mem_alloc :
364
- b = bch2_btree_node_mem_alloc (trans , interior_node );
365
- six_unlock_write (& b -> c .lock );
366
- six_unlock_intent (& b -> c .lock );
367
-
368
- /* we hold cannibalize_lock: */
369
- BUG_ON (IS_ERR (b ));
370
- BUG_ON (b -> ob .nr );
371
-
368
+ out :
372
369
bkey_copy (& b -> key , & tmp .k );
373
370
b -> ob = obs ;
371
+ six_unlock_write (& b -> c .lock );
372
+ six_unlock_intent (& b -> c .lock );
374
373
375
374
return b ;
375
+ err :
376
+ bch2_btree_node_to_freelist (c , b );
377
+ return ERR_PTR (ret );
376
378
}
377
379
378
380
static struct btree * bch2_btree_node_alloc (struct btree_update * as ,
@@ -2439,21 +2441,19 @@ int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *ite
2439
2441
}
2440
2442
2441
2443
new_hash = bch2_btree_node_mem_alloc (trans , false);
2444
+ ret = PTR_ERR_OR_ZERO (new_hash );
2445
+ if (ret )
2446
+ goto err ;
2442
2447
}
2443
2448
2444
2449
path -> intent_ref ++ ;
2445
2450
ret = __bch2_btree_node_update_key (trans , iter , b , new_hash , new_key ,
2446
2451
commit_flags , skip_triggers );
2447
2452
-- path -> intent_ref ;
2448
2453
2449
- if (new_hash ) {
2450
- mutex_lock (& c -> btree_cache .lock );
2451
- list_move (& new_hash -> list , & c -> btree_cache .freeable );
2452
- mutex_unlock (& c -> btree_cache .lock );
2453
-
2454
- six_unlock_write (& new_hash -> c .lock );
2455
- six_unlock_intent (& new_hash -> c .lock );
2456
- }
2454
+ if (new_hash )
2455
+ bch2_btree_node_to_freelist (c , new_hash );
2456
+ err :
2457
2457
closure_sync (& cl );
2458
2458
bch2_btree_cache_cannibalize_unlock (trans );
2459
2459
return ret ;
@@ -2522,6 +2522,10 @@ int bch2_btree_root_alloc_fake_trans(struct btree_trans *trans, enum btree_id id
2522
2522
b = bch2_btree_node_mem_alloc (trans , false);
2523
2523
bch2_btree_cache_cannibalize_unlock (trans );
2524
2524
2525
+ ret = PTR_ERR_OR_ZERO (b );
2526
+ if (ret )
2527
+ return ret ;
2528
+
2525
2529
set_btree_node_fake (b );
2526
2530
set_btree_node_need_rewrite (b );
2527
2531
b -> c .level = level ;
@@ -2553,7 +2557,7 @@ int bch2_btree_root_alloc_fake_trans(struct btree_trans *trans, enum btree_id id
2553
2557
2554
2558
void bch2_btree_root_alloc_fake (struct bch_fs * c , enum btree_id id , unsigned level )
2555
2559
{
2556
- bch2_trans_run (c , bch2_btree_root_alloc_fake_trans (trans , id , level ));
2560
+ bch2_trans_run (c , lockrestart_do ( trans , bch2_btree_root_alloc_fake_trans (trans , id , level ) ));
2557
2561
}
2558
2562
2559
2563
static void bch2_btree_update_to_text (struct printbuf * out , struct btree_update * as )
0 commit comments