Skip to content

Commit 0113780

Browse files
aharonl-nvidiajgunthorpe
authored andcommitted
RDMA/mlx5: Rename the mkey cache variables and functions
After replacing the MR cache with an Mkey cache, rename the variables and functions to fit the new meaning. Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Aharon Landau <[email protected]> Signed-off-by: Leon Romanovsky <[email protected]> Signed-off-by: Jason Gunthorpe <[email protected]>
1 parent 6b75338 commit 0113780

File tree

5 files changed

+40
-40
lines changed

5 files changed

+40
-40
lines changed

drivers/infiniband/hw/mlx5/main.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4002,7 +4002,7 @@ static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
40024002
{
40034003
int err;
40044004

4005-
err = mlx5_mr_cache_cleanup(dev);
4005+
err = mlx5_mkey_cache_cleanup(dev);
40064006
if (err)
40074007
mlx5_ib_warn(dev, "mr cache cleanup failed\n");
40084008

@@ -4022,7 +4022,7 @@ static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
40224022
if (ret)
40234023
return ret;
40244024

4025-
ret = mlx5_mr_cache_init(dev);
4025+
ret = mlx5_mkey_cache_init(dev);
40264026
if (ret) {
40274027
mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
40284028
mlx5r_umr_resource_cleanup(dev);

drivers/infiniband/hw/mlx5/mlx5_ib.h

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -764,9 +764,9 @@ struct mlx5r_async_create_mkey {
764764
u32 mkey;
765765
};
766766

767-
struct mlx5_mr_cache {
767+
struct mlx5_mkey_cache {
768768
struct workqueue_struct *wq;
769-
struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES];
769+
struct mlx5_cache_ent ent[MAX_MKEY_CACHE_ENTRIES];
770770
struct dentry *root;
771771
unsigned long last_add;
772772
};
@@ -1065,7 +1065,7 @@ struct mlx5_ib_dev {
10651065
struct mlx5_ib_resources devr;
10661066

10671067
atomic_t mkey_var;
1068-
struct mlx5_mr_cache cache;
1068+
struct mlx5_mkey_cache cache;
10691069
struct timer_list delay_timer;
10701070
/* Prevents soft lock on massive reg MRs */
10711071
struct mutex slow_path_mutex;
@@ -1310,8 +1310,8 @@ void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas,
13101310
u64 access_flags);
13111311
void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
13121312
int mlx5_ib_get_cqe_size(struct ib_cq *ibcq);
1313-
int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
1314-
int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
1313+
int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev);
1314+
int mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev);
13151315

13161316
struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
13171317
struct mlx5_cache_ent *ent,
@@ -1339,7 +1339,7 @@ int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq);
13391339
void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev);
13401340
int __init mlx5_ib_odp_init(void);
13411341
void mlx5_ib_odp_cleanup(void);
1342-
void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent);
1342+
void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent);
13431343
void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
13441344
struct mlx5_ib_mr *mr, int flags);
13451345

@@ -1358,7 +1358,7 @@ static inline int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev,
13581358
static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {}
13591359
static inline int mlx5_ib_odp_init(void) { return 0; }
13601360
static inline void mlx5_ib_odp_cleanup(void) {}
1361-
static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {}
1361+
static inline void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent) {}
13621362
static inline void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
13631363
struct mlx5_ib_mr *mr, int flags) {}
13641364

drivers/infiniband/hw/mlx5/mr.c

Lines changed: 27 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ static int mlx5_ib_create_mkey_cb(struct mlx5r_async_create_mkey *async_create)
119119
&async_create->cb_work);
120120
}
121121

122-
static int mr_cache_max_order(struct mlx5_ib_dev *dev);
122+
static int mkey_cache_max_order(struct mlx5_ib_dev *dev);
123123
static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent);
124124

125125
static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
@@ -515,11 +515,11 @@ static const struct file_operations limit_fops = {
515515
.read = limit_read,
516516
};
517517

518-
static bool someone_adding(struct mlx5_mr_cache *cache)
518+
static bool someone_adding(struct mlx5_mkey_cache *cache)
519519
{
520520
unsigned int i;
521521

522-
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
522+
for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) {
523523
struct mlx5_cache_ent *ent = &cache->ent[i];
524524
bool ret;
525525

@@ -569,7 +569,7 @@ static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
569569
static void __cache_work_func(struct mlx5_cache_ent *ent)
570570
{
571571
struct mlx5_ib_dev *dev = ent->dev;
572-
struct mlx5_mr_cache *cache = &dev->cache;
572+
struct mlx5_mkey_cache *cache = &dev->cache;
573573
int err;
574574

575575
xa_lock_irq(&ent->mkeys);
@@ -681,7 +681,7 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
681681

682682
static void clean_keys(struct mlx5_ib_dev *dev, int c)
683683
{
684-
struct mlx5_mr_cache *cache = &dev->cache;
684+
struct mlx5_mkey_cache *cache = &dev->cache;
685685
struct mlx5_cache_ent *ent = &cache->ent[c];
686686
u32 mkey;
687687

@@ -696,7 +696,7 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
696696
xa_unlock_irq(&ent->mkeys);
697697
}
698698

699-
static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
699+
static void mlx5_mkey_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
700700
{
701701
if (!mlx5_debugfs_root || dev->is_rep)
702702
return;
@@ -705,9 +705,9 @@ static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
705705
dev->cache.root = NULL;
706706
}
707707

708-
static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
708+
static void mlx5_mkey_cache_debugfs_init(struct mlx5_ib_dev *dev)
709709
{
710-
struct mlx5_mr_cache *cache = &dev->cache;
710+
struct mlx5_mkey_cache *cache = &dev->cache;
711711
struct mlx5_cache_ent *ent;
712712
struct dentry *dir;
713713
int i;
@@ -717,7 +717,7 @@ static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
717717

718718
cache->root = debugfs_create_dir("mr_cache", mlx5_debugfs_get_dev_root(dev->mdev));
719719

720-
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
720+
for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) {
721721
ent = &cache->ent[i];
722722
sprintf(ent->name, "%d", ent->order);
723723
dir = debugfs_create_dir(ent->name, cache->root);
@@ -735,9 +735,9 @@ static void delay_time_func(struct timer_list *t)
735735
WRITE_ONCE(dev->fill_delay, 0);
736736
}
737737

738-
int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
738+
int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev)
739739
{
740-
struct mlx5_mr_cache *cache = &dev->cache;
740+
struct mlx5_mkey_cache *cache = &dev->cache;
741741
struct mlx5_cache_ent *ent;
742742
int i;
743743

@@ -750,7 +750,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
750750

751751
mlx5_cmd_init_async_ctx(dev->mdev, &dev->async_ctx);
752752
timer_setup(&dev->delay_timer, delay_time_func, 0);
753-
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
753+
for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) {
754754
ent = &cache->ent[i];
755755
xa_init_flags(&ent->mkeys, XA_FLAGS_LOCK_IRQ);
756756
ent->order = i + 2;
@@ -759,12 +759,12 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
759759

760760
INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
761761

762-
if (i > MR_CACHE_LAST_STD_ENTRY) {
763-
mlx5_odp_init_mr_cache_entry(ent);
762+
if (i > MKEY_CACHE_LAST_STD_ENTRY) {
763+
mlx5_odp_init_mkey_cache_entry(ent);
764764
continue;
765765
}
766766

767-
if (ent->order > mr_cache_max_order(dev))
767+
if (ent->order > mkey_cache_max_order(dev))
768768
continue;
769769

770770
ent->page = PAGE_SHIFT;
@@ -781,19 +781,19 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
781781
xa_unlock_irq(&ent->mkeys);
782782
}
783783

784-
mlx5_mr_cache_debugfs_init(dev);
784+
mlx5_mkey_cache_debugfs_init(dev);
785785

786786
return 0;
787787
}
788788

789-
int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
789+
int mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev)
790790
{
791791
unsigned int i;
792792

793793
if (!dev->cache.wq)
794794
return 0;
795795

796-
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
796+
for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) {
797797
struct mlx5_cache_ent *ent = &dev->cache.ent[i];
798798

799799
xa_lock_irq(&ent->mkeys);
@@ -802,10 +802,10 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
802802
cancel_delayed_work_sync(&ent->dwork);
803803
}
804804

805-
mlx5_mr_cache_debugfs_cleanup(dev);
805+
mlx5_mkey_cache_debugfs_cleanup(dev);
806806
mlx5_cmd_cleanup_async_ctx(&dev->async_ctx);
807807

808-
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
808+
for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++)
809809
clean_keys(dev, i);
810810

811811
destroy_workqueue(dev->cache.wq);
@@ -872,22 +872,22 @@ static int get_octo_len(u64 addr, u64 len, int page_shift)
872872
return (npages + 1) / 2;
873873
}
874874

875-
static int mr_cache_max_order(struct mlx5_ib_dev *dev)
875+
static int mkey_cache_max_order(struct mlx5_ib_dev *dev)
876876
{
877877
if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
878-
return MR_CACHE_LAST_STD_ENTRY + 2;
878+
return MKEY_CACHE_LAST_STD_ENTRY + 2;
879879
return MLX5_MAX_UMR_SHIFT;
880880
}
881881

882-
static struct mlx5_cache_ent *mr_cache_ent_from_order(struct mlx5_ib_dev *dev,
883-
unsigned int order)
882+
static struct mlx5_cache_ent *mkey_cache_ent_from_order(struct mlx5_ib_dev *dev,
883+
unsigned int order)
884884
{
885-
struct mlx5_mr_cache *cache = &dev->cache;
885+
struct mlx5_mkey_cache *cache = &dev->cache;
886886

887887
if (order < cache->ent[0].order)
888888
return &cache->ent[0];
889889
order = order - cache->ent[0].order;
890-
if (order > MR_CACHE_LAST_STD_ENTRY)
890+
if (order > MKEY_CACHE_LAST_STD_ENTRY)
891891
return NULL;
892892
return &cache->ent[order];
893893
}
@@ -930,7 +930,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
930930
0, iova);
931931
if (WARN_ON(!page_size))
932932
return ERR_PTR(-EINVAL);
933-
ent = mr_cache_ent_from_order(
933+
ent = mkey_cache_ent_from_order(
934934
dev, order_base_2(ib_umem_num_dma_blocks(umem, page_size)));
935935
/*
936936
* Matches access in alloc_cache_mr(). If the MR can't come from the

drivers/infiniband/hw/mlx5/odp.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1588,7 +1588,7 @@ mlx5_ib_odp_destroy_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
15881588
return err;
15891589
}
15901590

1591-
void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent)
1591+
void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent)
15921592
{
15931593
if (!(ent->dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
15941594
return;

include/linux/mlx5/driver.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -728,10 +728,10 @@ enum {
728728
};
729729

730730
enum {
731-
MR_CACHE_LAST_STD_ENTRY = 20,
731+
MKEY_CACHE_LAST_STD_ENTRY = 20,
732732
MLX5_IMR_MTT_CACHE_ENTRY,
733733
MLX5_IMR_KSM_CACHE_ENTRY,
734-
MAX_MR_CACHE_ENTRIES
734+
MAX_MKEY_CACHE_ENTRIES
735735
};
736736

737737
struct mlx5_profile {
@@ -740,7 +740,7 @@ struct mlx5_profile {
740740
struct {
741741
int size;
742742
int limit;
743-
} mr_cache[MAX_MR_CACHE_ENTRIES];
743+
} mr_cache[MAX_MKEY_CACHE_ENTRIES];
744744
};
745745

746746
struct mlx5_hca_cap {

0 commit comments

Comments
 (0)