@@ -619,6 +619,7 @@ struct mlx5_ib_mkey {
619
619
unsigned int ndescs ;
620
620
struct wait_queue_head wait ;
621
621
refcount_t usecount ;
622
+ struct mlx5_cache_ent * cache_ent ;
622
623
};
623
624
624
625
#define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
@@ -641,18 +642,9 @@ struct mlx5_ib_mr {
641
642
struct ib_mr ibmr ;
642
643
struct mlx5_ib_mkey mmkey ;
643
644
644
- /* User MR data */
645
- struct mlx5_cache_ent * cache_ent ;
646
- /* Everything after cache_ent is zero'd when MR allocated */
647
645
struct ib_umem * umem ;
648
646
649
647
union {
650
- /* Used only while the MR is in the cache */
651
- struct {
652
- u32 out [MLX5_ST_SZ_DW (create_mkey_out )];
653
- struct mlx5_async_work cb_work ;
654
- };
655
-
656
648
/* Used only by kernel MRs (umem == NULL) */
657
649
struct {
658
650
void * descs ;
@@ -692,12 +684,6 @@ struct mlx5_ib_mr {
692
684
};
693
685
};
694
686
695
- /* Zero the fields in the mr that are variant depending on usage */
696
- static inline void mlx5_clear_mr (struct mlx5_ib_mr * mr )
697
- {
698
- memset_after (mr , 0 , cache_ent );
699
- }
700
-
701
687
static inline bool is_odp_mr (struct mlx5_ib_mr * mr )
702
688
{
703
689
return IS_ENABLED (CONFIG_INFINIBAND_ON_DEMAND_PAGING ) && mr -> umem &&
@@ -768,6 +754,16 @@ struct mlx5_cache_ent {
768
754
struct delayed_work dwork ;
769
755
};
770
756
757
+ struct mlx5r_async_create_mkey {
758
+ union {
759
+ u32 in [MLX5_ST_SZ_BYTES (create_mkey_in )];
760
+ u32 out [MLX5_ST_SZ_DW (create_mkey_out )];
761
+ };
762
+ struct mlx5_async_work cb_work ;
763
+ struct mlx5_cache_ent * ent ;
764
+ u32 mkey ;
765
+ };
766
+
771
767
struct mlx5_mr_cache {
772
768
struct workqueue_struct * wq ;
773
769
struct mlx5_cache_ent ent [MAX_MR_CACHE_ENTRIES ];
0 commit comments