@@ -268,7 +268,6 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context)
268
268
269
269
xa_lock_irqsave (& ent -> mkeys , flags );
270
270
push_to_reserved (ent , mr );
271
- ent -> total_mrs ++ ;
272
271
/* If we are doing fill_to_high_water then keep going. */
273
272
queue_adjust_cache_locked (ent );
274
273
xa_unlock_irqrestore (& ent -> mkeys , flags );
@@ -391,9 +390,6 @@ static struct mlx5_ib_mr *create_cache_mr(struct mlx5_cache_ent *ent)
391
390
init_waitqueue_head (& mr -> mmkey .wait );
392
391
mr -> mmkey .type = MLX5_MKEY_MR ;
393
392
WRITE_ONCE (ent -> dev -> cache .last_add , jiffies );
394
- xa_lock_irq (& ent -> mkeys );
395
- ent -> total_mrs ++ ;
396
- xa_unlock_irq (& ent -> mkeys );
397
393
kfree (in );
398
394
return mr ;
399
395
free_mr :
@@ -411,7 +407,6 @@ static void remove_cache_mr_locked(struct mlx5_cache_ent *ent)
411
407
if (!ent -> stored )
412
408
return ;
413
409
mr = pop_stored_mkey (ent );
414
- ent -> total_mrs -- ;
415
410
xa_unlock_irq (& ent -> mkeys );
416
411
mlx5_core_destroy_mkey (ent -> dev -> mdev , mr -> mmkey .key );
417
412
kfree (mr );
@@ -467,11 +462,11 @@ static ssize_t size_write(struct file *filp, const char __user *buf,
467
462
* mkeys.
468
463
*/
469
464
xa_lock_irq (& ent -> mkeys );
470
- if (target < ent -> total_mrs - ent -> stored ) {
465
+ if (target < ent -> in_use ) {
471
466
err = - EINVAL ;
472
467
goto err_unlock ;
473
468
}
474
- target = target - ( ent -> total_mrs - ent -> stored ) ;
469
+ target = target - ent -> in_use ;
475
470
if (target < ent -> limit || target > ent -> limit * 2 ) {
476
471
err = - EINVAL ;
477
472
goto err_unlock ;
@@ -495,7 +490,7 @@ static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
495
490
char lbuf [20 ];
496
491
int err ;
497
492
498
- err = snprintf (lbuf , sizeof (lbuf ), "%d \n" , ent -> total_mrs );
493
+ err = snprintf (lbuf , sizeof (lbuf ), "%ld \n" , ent -> stored + ent -> in_use );
499
494
if (err < 0 )
500
495
return err ;
501
496
@@ -689,13 +684,19 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
689
684
return ERR_PTR (- EOPNOTSUPP );
690
685
691
686
xa_lock_irq (& ent -> mkeys );
687
+ ent -> in_use ++ ;
688
+
692
689
if (!ent -> stored ) {
693
690
queue_adjust_cache_locked (ent );
694
691
ent -> miss ++ ;
695
692
xa_unlock_irq (& ent -> mkeys );
696
693
mr = create_cache_mr (ent );
697
- if (IS_ERR (mr ))
694
+ if (IS_ERR (mr )) {
695
+ xa_lock_irq (& ent -> mkeys );
696
+ ent -> in_use -- ;
697
+ xa_unlock_irq (& ent -> mkeys );
698
698
return mr ;
699
+ }
699
700
} else {
700
701
mr = pop_stored_mkey (ent );
701
702
queue_adjust_cache_locked (ent );
@@ -716,7 +717,6 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
716
717
xa_lock_irq (& ent -> mkeys );
717
718
while (ent -> stored ) {
718
719
mr = pop_stored_mkey (ent );
719
- ent -> total_mrs -- ;
720
720
xa_unlock_irq (& ent -> mkeys );
721
721
mlx5_core_destroy_mkey (dev -> mdev , mr -> mmkey .key );
722
722
kfree (mr );
@@ -1642,13 +1642,13 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
1642
1642
1643
1643
/* Stop DMA */
1644
1644
if (mr -> cache_ent ) {
1645
+ xa_lock_irq (& mr -> cache_ent -> mkeys );
1646
+ mr -> cache_ent -> in_use -- ;
1647
+ xa_unlock_irq (& mr -> cache_ent -> mkeys );
1648
+
1645
1649
if (mlx5r_umr_revoke_mr (mr ) ||
1646
- push_mkey (mr -> cache_ent , false, mr )) {
1647
- xa_lock_irq (& mr -> cache_ent -> mkeys );
1648
- mr -> cache_ent -> total_mrs -- ;
1649
- xa_unlock_irq (& mr -> cache_ent -> mkeys );
1650
+ push_mkey (mr -> cache_ent , false, mr ))
1650
1651
mr -> cache_ent = NULL ;
1651
- }
1652
1652
}
1653
1653
if (!mr -> cache_ent ) {
1654
1654
rc = destroy_mkey (to_mdev (mr -> ibmr .device ), mr );
0 commit comments