@@ -293,73 +293,78 @@ static void recover_list_clear(struct dlm_ls *ls)
293
293
spin_unlock_bh (& ls -> ls_recover_list_lock );
294
294
}
295
295
296
- static int recover_idr_empty (struct dlm_ls * ls )
296
+ static int recover_xa_empty (struct dlm_ls * ls )
297
297
{
298
298
int empty = 1 ;
299
299
300
- spin_lock_bh (& ls -> ls_recover_idr_lock );
300
+ spin_lock_bh (& ls -> ls_recover_xa_lock );
301
301
if (ls -> ls_recover_list_count )
302
302
empty = 0 ;
303
- spin_unlock_bh (& ls -> ls_recover_idr_lock );
303
+ spin_unlock_bh (& ls -> ls_recover_xa_lock );
304
304
305
305
return empty ;
306
306
}
307
307
308
- static int recover_idr_add (struct dlm_rsb * r )
308
+ static int recover_xa_add (struct dlm_rsb * r )
309
309
{
310
310
struct dlm_ls * ls = r -> res_ls ;
311
+ struct xa_limit limit = {
312
+ .min = 1 ,
313
+ .max = UINT_MAX ,
314
+ };
315
+ uint32_t id ;
311
316
int rv ;
312
317
313
- spin_lock_bh (& ls -> ls_recover_idr_lock );
318
+ spin_lock_bh (& ls -> ls_recover_xa_lock );
314
319
if (r -> res_id ) {
315
320
rv = -1 ;
316
321
goto out_unlock ;
317
322
}
318
- rv = idr_alloc (& ls -> ls_recover_idr , r , 1 , 0 , GFP_NOWAIT );
323
+ rv = xa_alloc (& ls -> ls_recover_xa , & id , r , limit , GFP_ATOMIC );
319
324
if (rv < 0 )
320
325
goto out_unlock ;
321
326
322
- r -> res_id = rv ;
327
+ r -> res_id = id ;
323
328
ls -> ls_recover_list_count ++ ;
324
329
dlm_hold_rsb (r );
325
330
rv = 0 ;
326
331
out_unlock :
327
- spin_unlock_bh (& ls -> ls_recover_idr_lock );
332
+ spin_unlock_bh (& ls -> ls_recover_xa_lock );
328
333
return rv ;
329
334
}
330
335
331
- static void recover_idr_del (struct dlm_rsb * r )
336
+ static void recover_xa_del (struct dlm_rsb * r )
332
337
{
333
338
struct dlm_ls * ls = r -> res_ls ;
334
339
335
- spin_lock_bh (& ls -> ls_recover_idr_lock );
336
- idr_remove (& ls -> ls_recover_idr , r -> res_id );
340
+ spin_lock_bh (& ls -> ls_recover_xa_lock );
341
+ xa_erase_bh (& ls -> ls_recover_xa , r -> res_id );
337
342
r -> res_id = 0 ;
338
343
ls -> ls_recover_list_count -- ;
339
- spin_unlock_bh (& ls -> ls_recover_idr_lock );
344
+ spin_unlock_bh (& ls -> ls_recover_xa_lock );
340
345
341
346
dlm_put_rsb (r );
342
347
}
343
348
344
- static struct dlm_rsb * recover_idr_find (struct dlm_ls * ls , uint64_t id )
349
+ static struct dlm_rsb * recover_xa_find (struct dlm_ls * ls , uint64_t id )
345
350
{
346
351
struct dlm_rsb * r ;
347
352
348
- spin_lock_bh (& ls -> ls_recover_idr_lock );
349
- r = idr_find (& ls -> ls_recover_idr , (int )id );
350
- spin_unlock_bh (& ls -> ls_recover_idr_lock );
353
+ spin_lock_bh (& ls -> ls_recover_xa_lock );
354
+ r = xa_load (& ls -> ls_recover_xa , (int )id );
355
+ spin_unlock_bh (& ls -> ls_recover_xa_lock );
351
356
return r ;
352
357
}
353
358
354
- static void recover_idr_clear (struct dlm_ls * ls )
359
+ static void recover_xa_clear (struct dlm_ls * ls )
355
360
{
356
361
struct dlm_rsb * r ;
357
- int id ;
362
+ unsigned long id ;
358
363
359
- spin_lock_bh (& ls -> ls_recover_idr_lock );
364
+ spin_lock_bh (& ls -> ls_recover_xa_lock );
360
365
361
- idr_for_each_entry (& ls -> ls_recover_idr , r , id ) {
362
- idr_remove (& ls -> ls_recover_idr , id );
366
+ xa_for_each (& ls -> ls_recover_xa , id , r ) {
367
+ xa_erase_bh (& ls -> ls_recover_xa , id );
363
368
r -> res_id = 0 ;
364
369
r -> res_recover_locks_count = 0 ;
365
370
ls -> ls_recover_list_count -- ;
@@ -372,7 +377,7 @@ static void recover_idr_clear(struct dlm_ls *ls)
372
377
ls -> ls_recover_list_count );
373
378
ls -> ls_recover_list_count = 0 ;
374
379
}
375
- spin_unlock_bh (& ls -> ls_recover_idr_lock );
380
+ spin_unlock_bh (& ls -> ls_recover_xa_lock );
376
381
}
377
382
378
383
@@ -470,7 +475,7 @@ static int recover_master(struct dlm_rsb *r, unsigned int *count, uint64_t seq)
470
475
set_new_master (r );
471
476
error = 0 ;
472
477
} else {
473
- recover_idr_add (r );
478
+ recover_xa_add (r );
474
479
error = dlm_send_rcom_lookup (r , dir_nodeid , seq );
475
480
}
476
481
@@ -551,10 +556,10 @@ int dlm_recover_masters(struct dlm_ls *ls, uint64_t seq,
551
556
552
557
log_rinfo (ls , "dlm_recover_masters %u of %u" , count , total );
553
558
554
- error = dlm_wait_function (ls , & recover_idr_empty );
559
+ error = dlm_wait_function (ls , & recover_xa_empty );
555
560
out :
556
561
if (error )
557
- recover_idr_clear (ls );
562
+ recover_xa_clear (ls );
558
563
return error ;
559
564
}
560
565
@@ -563,7 +568,7 @@ int dlm_recover_master_reply(struct dlm_ls *ls, const struct dlm_rcom *rc)
563
568
struct dlm_rsb * r ;
564
569
int ret_nodeid , new_master ;
565
570
566
- r = recover_idr_find (ls , le64_to_cpu (rc -> rc_id ));
571
+ r = recover_xa_find (ls , le64_to_cpu (rc -> rc_id ));
567
572
if (!r ) {
568
573
log_error (ls , "dlm_recover_master_reply no id %llx" ,
569
574
(unsigned long long )le64_to_cpu (rc -> rc_id ));
@@ -582,9 +587,9 @@ int dlm_recover_master_reply(struct dlm_ls *ls, const struct dlm_rcom *rc)
582
587
r -> res_nodeid = new_master ;
583
588
set_new_master (r );
584
589
unlock_rsb (r );
585
- recover_idr_del (r );
590
+ recover_xa_del (r );
586
591
587
- if (recover_idr_empty (ls ))
592
+ if (recover_xa_empty (ls ))
588
593
wake_up (& ls -> ls_wait_general );
589
594
out :
590
595
return 0 ;
0 commit comments