@@ -941,23 +941,23 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev,
941
941
MLX5_SET64 (virtio_q , vq_ctx , used_addr , mvq -> device_addr );
942
942
MLX5_SET64 (virtio_q , vq_ctx , available_addr , mvq -> driver_addr );
943
943
944
- vq_mr = mvdev -> mr [mvdev -> group2asid [MLX5_VDPA_DATAVQ_GROUP ]];
944
+ vq_mr = mvdev -> mres . mr [mvdev -> mres . group2asid [MLX5_VDPA_DATAVQ_GROUP ]];
945
945
if (vq_mr )
946
946
MLX5_SET (virtio_q , vq_ctx , virtio_q_mkey , vq_mr -> mkey );
947
947
948
- vq_desc_mr = mvdev -> mr [mvdev -> group2asid [MLX5_VDPA_DATAVQ_DESC_GROUP ]];
948
+ vq_desc_mr = mvdev -> mres . mr [mvdev -> mres . group2asid [MLX5_VDPA_DATAVQ_DESC_GROUP ]];
949
949
if (vq_desc_mr &&
950
950
MLX5_CAP_DEV_VDPA_EMULATION (mvdev -> mdev , desc_group_mkey_supported ))
951
951
MLX5_SET (virtio_q , vq_ctx , desc_group_mkey , vq_desc_mr -> mkey );
952
952
} else {
953
953
/* If there is no mr update, make sure that the existing ones are set
954
954
* modify to ready.
955
955
*/
956
- vq_mr = mvdev -> mr [mvdev -> group2asid [MLX5_VDPA_DATAVQ_GROUP ]];
956
+ vq_mr = mvdev -> mres . mr [mvdev -> mres . group2asid [MLX5_VDPA_DATAVQ_GROUP ]];
957
957
if (vq_mr )
958
958
mvq -> modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY ;
959
959
960
- vq_desc_mr = mvdev -> mr [mvdev -> group2asid [MLX5_VDPA_DATAVQ_DESC_GROUP ]];
960
+ vq_desc_mr = mvdev -> mres . mr [mvdev -> mres . group2asid [MLX5_VDPA_DATAVQ_DESC_GROUP ]];
961
961
if (vq_desc_mr )
962
962
mvq -> modified_fields |= MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY ;
963
963
}
@@ -1354,7 +1354,7 @@ static void fill_modify_virtqueue_cmd(struct mlx5_vdpa_net *ndev,
1354
1354
}
1355
1355
1356
1356
if (mvq -> modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY ) {
1357
- vq_mr = mvdev -> mr [mvdev -> group2asid [MLX5_VDPA_DATAVQ_GROUP ]];
1357
+ vq_mr = mvdev -> mres . mr [mvdev -> mres . group2asid [MLX5_VDPA_DATAVQ_GROUP ]];
1358
1358
1359
1359
if (vq_mr )
1360
1360
MLX5_SET (virtio_q , vq_ctx , virtio_q_mkey , vq_mr -> mkey );
@@ -1363,7 +1363,7 @@ static void fill_modify_virtqueue_cmd(struct mlx5_vdpa_net *ndev,
1363
1363
}
1364
1364
1365
1365
if (mvq -> modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY ) {
1366
- desc_mr = mvdev -> mr [mvdev -> group2asid [MLX5_VDPA_DATAVQ_DESC_GROUP ]];
1366
+ desc_mr = mvdev -> mres . mr [mvdev -> mres . group2asid [MLX5_VDPA_DATAVQ_DESC_GROUP ]];
1367
1367
1368
1368
if (desc_mr && MLX5_CAP_DEV_VDPA_EMULATION (mvdev -> mdev , desc_group_mkey_supported ))
1369
1369
MLX5_SET (virtio_q , vq_ctx , desc_group_mkey , desc_mr -> mkey );
@@ -1381,17 +1381,17 @@ static void modify_virtqueue_end(struct mlx5_vdpa_net *ndev,
1381
1381
struct mlx5_vdpa_dev * mvdev = & ndev -> mvdev ;
1382
1382
1383
1383
if (mvq -> modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY ) {
1384
- unsigned int asid = mvdev -> group2asid [MLX5_VDPA_DATAVQ_GROUP ];
1385
- struct mlx5_vdpa_mr * vq_mr = mvdev -> mr [asid ];
1384
+ unsigned int asid = mvdev -> mres . group2asid [MLX5_VDPA_DATAVQ_GROUP ];
1385
+ struct mlx5_vdpa_mr * vq_mr = mvdev -> mres . mr [asid ];
1386
1386
1387
1387
mlx5_vdpa_put_mr (mvdev , mvq -> vq_mr );
1388
1388
mlx5_vdpa_get_mr (mvdev , vq_mr );
1389
1389
mvq -> vq_mr = vq_mr ;
1390
1390
}
1391
1391
1392
1392
if (mvq -> modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY ) {
1393
- unsigned int asid = mvdev -> group2asid [MLX5_VDPA_DATAVQ_DESC_GROUP ];
1394
- struct mlx5_vdpa_mr * desc_mr = mvdev -> mr [asid ];
1393
+ unsigned int asid = mvdev -> mres . group2asid [MLX5_VDPA_DATAVQ_DESC_GROUP ];
1394
+ struct mlx5_vdpa_mr * desc_mr = mvdev -> mres . mr [asid ];
1395
1395
1396
1396
mlx5_vdpa_put_mr (mvdev , mvq -> desc_mr );
1397
1397
mlx5_vdpa_get_mr (mvdev , desc_mr );
@@ -3235,7 +3235,7 @@ static void init_group_to_asid_map(struct mlx5_vdpa_dev *mvdev)
3235
3235
3236
3236
/* default mapping all groups are mapped to asid 0 */
3237
3237
for (i = 0 ; i < MLX5_VDPA_NUMVQ_GROUPS ; i ++ )
3238
- mvdev -> group2asid [i ] = 0 ;
3238
+ mvdev -> mres . group2asid [i ] = 0 ;
3239
3239
}
3240
3240
3241
3241
static bool needs_vqs_reset (const struct mlx5_vdpa_dev * mvdev )
@@ -3353,7 +3353,7 @@ static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
3353
3353
new_mr = NULL ;
3354
3354
}
3355
3355
3356
- if (!mvdev -> mr [asid ]) {
3356
+ if (!mvdev -> mres . mr [asid ]) {
3357
3357
mlx5_vdpa_update_mr (mvdev , new_mr , asid );
3358
3358
} else {
3359
3359
err = mlx5_vdpa_change_map (mvdev , new_mr , asid );
@@ -3637,12 +3637,12 @@ static int mlx5_set_group_asid(struct vdpa_device *vdev, u32 group,
3637
3637
if (group >= MLX5_VDPA_NUMVQ_GROUPS )
3638
3638
return - EINVAL ;
3639
3639
3640
- mvdev -> group2asid [group ] = asid ;
3640
+ mvdev -> mres . group2asid [group ] = asid ;
3641
3641
3642
- mutex_lock (& mvdev -> mr_mtx );
3643
- if (group == MLX5_VDPA_CVQ_GROUP && mvdev -> mr [asid ])
3644
- err = mlx5_vdpa_update_cvq_iotlb (mvdev , mvdev -> mr [asid ]-> iotlb , asid );
3645
- mutex_unlock (& mvdev -> mr_mtx );
3642
+ mutex_lock (& mvdev -> mres . mr_mtx );
3643
+ if (group == MLX5_VDPA_CVQ_GROUP && mvdev -> mres . mr [asid ])
3644
+ err = mlx5_vdpa_update_cvq_iotlb (mvdev , mvdev -> mres . mr [asid ]-> iotlb , asid );
3645
+ mutex_unlock (& mvdev -> mres . mr_mtx );
3646
3646
3647
3647
return err ;
3648
3648
}
@@ -3962,7 +3962,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
3962
3962
if (err )
3963
3963
goto err_mpfs ;
3964
3964
3965
- INIT_LIST_HEAD (& mvdev -> mr_list_head );
3965
+ INIT_LIST_HEAD (& mvdev -> mres . mr_list_head );
3966
3966
3967
3967
if (MLX5_CAP_GEN (mvdev -> mdev , umem_uid_0 )) {
3968
3968
err = mlx5_vdpa_create_dma_mr (mvdev );
0 commit comments