@@ -119,7 +119,7 @@ static int mlx5_ib_create_mkey_cb(struct mlx5r_async_create_mkey *async_create)
119
119
& async_create -> cb_work );
120
120
}
121
121
122
- static int mr_cache_max_order (struct mlx5_ib_dev * dev );
122
+ static int mkey_cache_max_order (struct mlx5_ib_dev * dev );
123
123
static void queue_adjust_cache_locked (struct mlx5_cache_ent * ent );
124
124
125
125
static int destroy_mkey (struct mlx5_ib_dev * dev , struct mlx5_ib_mr * mr )
@@ -515,11 +515,11 @@ static const struct file_operations limit_fops = {
515
515
.read = limit_read ,
516
516
};
517
517
518
- static bool someone_adding (struct mlx5_mr_cache * cache )
518
+ static bool someone_adding (struct mlx5_mkey_cache * cache )
519
519
{
520
520
unsigned int i ;
521
521
522
- for (i = 0 ; i < MAX_MR_CACHE_ENTRIES ; i ++ ) {
522
+ for (i = 0 ; i < MAX_MKEY_CACHE_ENTRIES ; i ++ ) {
523
523
struct mlx5_cache_ent * ent = & cache -> ent [i ];
524
524
bool ret ;
525
525
@@ -569,7 +569,7 @@ static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
569
569
static void __cache_work_func (struct mlx5_cache_ent * ent )
570
570
{
571
571
struct mlx5_ib_dev * dev = ent -> dev ;
572
- struct mlx5_mr_cache * cache = & dev -> cache ;
572
+ struct mlx5_mkey_cache * cache = & dev -> cache ;
573
573
int err ;
574
574
575
575
xa_lock_irq (& ent -> mkeys );
@@ -681,7 +681,7 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
681
681
682
682
static void clean_keys (struct mlx5_ib_dev * dev , int c )
683
683
{
684
- struct mlx5_mr_cache * cache = & dev -> cache ;
684
+ struct mlx5_mkey_cache * cache = & dev -> cache ;
685
685
struct mlx5_cache_ent * ent = & cache -> ent [c ];
686
686
u32 mkey ;
687
687
@@ -696,7 +696,7 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
696
696
xa_unlock_irq (& ent -> mkeys );
697
697
}
698
698
699
- static void mlx5_mr_cache_debugfs_cleanup (struct mlx5_ib_dev * dev )
699
+ static void mlx5_mkey_cache_debugfs_cleanup (struct mlx5_ib_dev * dev )
700
700
{
701
701
if (!mlx5_debugfs_root || dev -> is_rep )
702
702
return ;
@@ -705,9 +705,9 @@ static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
705
705
dev -> cache .root = NULL ;
706
706
}
707
707
708
- static void mlx5_mr_cache_debugfs_init (struct mlx5_ib_dev * dev )
708
+ static void mlx5_mkey_cache_debugfs_init (struct mlx5_ib_dev * dev )
709
709
{
710
- struct mlx5_mr_cache * cache = & dev -> cache ;
710
+ struct mlx5_mkey_cache * cache = & dev -> cache ;
711
711
struct mlx5_cache_ent * ent ;
712
712
struct dentry * dir ;
713
713
int i ;
@@ -717,7 +717,7 @@ static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
717
717
718
718
cache -> root = debugfs_create_dir ("mr_cache" , mlx5_debugfs_get_dev_root (dev -> mdev ));
719
719
720
- for (i = 0 ; i < MAX_MR_CACHE_ENTRIES ; i ++ ) {
720
+ for (i = 0 ; i < MAX_MKEY_CACHE_ENTRIES ; i ++ ) {
721
721
ent = & cache -> ent [i ];
722
722
sprintf (ent -> name , "%d" , ent -> order );
723
723
dir = debugfs_create_dir (ent -> name , cache -> root );
@@ -735,9 +735,9 @@ static void delay_time_func(struct timer_list *t)
735
735
WRITE_ONCE (dev -> fill_delay , 0 );
736
736
}
737
737
738
- int mlx5_mr_cache_init (struct mlx5_ib_dev * dev )
738
+ int mlx5_mkey_cache_init (struct mlx5_ib_dev * dev )
739
739
{
740
- struct mlx5_mr_cache * cache = & dev -> cache ;
740
+ struct mlx5_mkey_cache * cache = & dev -> cache ;
741
741
struct mlx5_cache_ent * ent ;
742
742
int i ;
743
743
@@ -750,7 +750,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
750
750
751
751
mlx5_cmd_init_async_ctx (dev -> mdev , & dev -> async_ctx );
752
752
timer_setup (& dev -> delay_timer , delay_time_func , 0 );
753
- for (i = 0 ; i < MAX_MR_CACHE_ENTRIES ; i ++ ) {
753
+ for (i = 0 ; i < MAX_MKEY_CACHE_ENTRIES ; i ++ ) {
754
754
ent = & cache -> ent [i ];
755
755
xa_init_flags (& ent -> mkeys , XA_FLAGS_LOCK_IRQ );
756
756
ent -> order = i + 2 ;
@@ -759,12 +759,12 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
759
759
760
760
INIT_DELAYED_WORK (& ent -> dwork , delayed_cache_work_func );
761
761
762
- if (i > MR_CACHE_LAST_STD_ENTRY ) {
763
- mlx5_odp_init_mr_cache_entry (ent );
762
+ if (i > MKEY_CACHE_LAST_STD_ENTRY ) {
763
+ mlx5_odp_init_mkey_cache_entry (ent );
764
764
continue ;
765
765
}
766
766
767
- if (ent -> order > mr_cache_max_order (dev ))
767
+ if (ent -> order > mkey_cache_max_order (dev ))
768
768
continue ;
769
769
770
770
ent -> page = PAGE_SHIFT ;
@@ -781,19 +781,19 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
781
781
xa_unlock_irq (& ent -> mkeys );
782
782
}
783
783
784
- mlx5_mr_cache_debugfs_init (dev );
784
+ mlx5_mkey_cache_debugfs_init (dev );
785
785
786
786
return 0 ;
787
787
}
788
788
789
- int mlx5_mr_cache_cleanup (struct mlx5_ib_dev * dev )
789
+ int mlx5_mkey_cache_cleanup (struct mlx5_ib_dev * dev )
790
790
{
791
791
unsigned int i ;
792
792
793
793
if (!dev -> cache .wq )
794
794
return 0 ;
795
795
796
- for (i = 0 ; i < MAX_MR_CACHE_ENTRIES ; i ++ ) {
796
+ for (i = 0 ; i < MAX_MKEY_CACHE_ENTRIES ; i ++ ) {
797
797
struct mlx5_cache_ent * ent = & dev -> cache .ent [i ];
798
798
799
799
xa_lock_irq (& ent -> mkeys );
@@ -802,10 +802,10 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
802
802
cancel_delayed_work_sync (& ent -> dwork );
803
803
}
804
804
805
- mlx5_mr_cache_debugfs_cleanup (dev );
805
+ mlx5_mkey_cache_debugfs_cleanup (dev );
806
806
mlx5_cmd_cleanup_async_ctx (& dev -> async_ctx );
807
807
808
- for (i = 0 ; i < MAX_MR_CACHE_ENTRIES ; i ++ )
808
+ for (i = 0 ; i < MAX_MKEY_CACHE_ENTRIES ; i ++ )
809
809
clean_keys (dev , i );
810
810
811
811
destroy_workqueue (dev -> cache .wq );
@@ -872,22 +872,22 @@ static int get_octo_len(u64 addr, u64 len, int page_shift)
872
872
return (npages + 1 ) / 2 ;
873
873
}
874
874
875
- static int mr_cache_max_order (struct mlx5_ib_dev * dev )
875
+ static int mkey_cache_max_order (struct mlx5_ib_dev * dev )
876
876
{
877
877
if (MLX5_CAP_GEN (dev -> mdev , umr_extended_translation_offset ))
878
- return MR_CACHE_LAST_STD_ENTRY + 2 ;
878
+ return MKEY_CACHE_LAST_STD_ENTRY + 2 ;
879
879
return MLX5_MAX_UMR_SHIFT ;
880
880
}
881
881
882
- static struct mlx5_cache_ent * mr_cache_ent_from_order (struct mlx5_ib_dev * dev ,
883
- unsigned int order )
882
+ static struct mlx5_cache_ent * mkey_cache_ent_from_order (struct mlx5_ib_dev * dev ,
883
+ unsigned int order )
884
884
{
885
- struct mlx5_mr_cache * cache = & dev -> cache ;
885
+ struct mlx5_mkey_cache * cache = & dev -> cache ;
886
886
887
887
if (order < cache -> ent [0 ].order )
888
888
return & cache -> ent [0 ];
889
889
order = order - cache -> ent [0 ].order ;
890
- if (order > MR_CACHE_LAST_STD_ENTRY )
890
+ if (order > MKEY_CACHE_LAST_STD_ENTRY )
891
891
return NULL ;
892
892
return & cache -> ent [order ];
893
893
}
@@ -930,7 +930,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
930
930
0 , iova );
931
931
if (WARN_ON (!page_size ))
932
932
return ERR_PTR (- EINVAL );
933
- ent = mr_cache_ent_from_order (
933
+ ent = mkey_cache_ent_from_order (
934
934
dev , order_base_2 (ib_umem_num_dma_blocks (umem , page_size )));
935
935
/*
936
936
* Matches access in alloc_cache_mr(). If the MR can't come from the
0 commit comments