Skip to content

Commit e60e9ee

Browse files
dtatuleamstsirkin
authored andcommitted
vdpa/mlx5: Allow creation of blank VQs
Based on the filled flag, create VQs that are filled or blank. Blank VQs will be filled in later through VQ modify. Downstream patches will make use of this to pre-create blank VQs at vdpa device creation. Signed-off-by: Dragos Tatulea <[email protected]> Reviewed-by: Cosmin Ratiu <[email protected]> Message-Id: <[email protected]> Signed-off-by: Michael S. Tsirkin <[email protected]> Acked-by: Eugenio Pérez <[email protected]> Acked-by: Eugenio Pérez <[email protected]>
1 parent ebebaf4 commit e60e9ee

File tree

1 file changed

+55
-30
lines changed

1 file changed

+55
-30
lines changed

drivers/vdpa/mlx5/net/mlx5_vnet.c

Lines changed: 55 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -158,7 +158,7 @@ static bool is_index_valid(struct mlx5_vdpa_dev *mvdev, u16 idx)
158158

159159
static void free_fixed_resources(struct mlx5_vdpa_net *ndev);
160160
static void mvqs_set_defaults(struct mlx5_vdpa_net *ndev);
161-
static int setup_vq_resources(struct mlx5_vdpa_net *ndev);
161+
static int setup_vq_resources(struct mlx5_vdpa_net *ndev, bool filled);
162162
static void teardown_vq_resources(struct mlx5_vdpa_net *ndev);
163163

164164
static bool mlx5_vdpa_debug;
@@ -874,13 +874,16 @@ static bool msix_mode_supported(struct mlx5_vdpa_dev *mvdev)
874874
pci_msix_can_alloc_dyn(mvdev->mdev->pdev);
875875
}
876876

877-
static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
877+
static int create_virtqueue(struct mlx5_vdpa_net *ndev,
878+
struct mlx5_vdpa_virtqueue *mvq,
879+
bool filled)
878880
{
879881
int inlen = MLX5_ST_SZ_BYTES(create_virtio_net_q_in);
880882
u32 out[MLX5_ST_SZ_DW(create_virtio_net_q_out)] = {};
881883
struct mlx5_vdpa_dev *mvdev = &ndev->mvdev;
882884
struct mlx5_vdpa_mr *vq_mr;
883885
struct mlx5_vdpa_mr *vq_desc_mr;
886+
u64 features = filled ? mvdev->actual_features : mvdev->mlx_features;
884887
void *obj_context;
885888
u16 mlx_features;
886889
void *cmd_hdr;
@@ -898,16 +901,14 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
898901
goto err_alloc;
899902
}
900903

901-
mlx_features = get_features(ndev->mvdev.actual_features);
904+
mlx_features = get_features(features);
902905
cmd_hdr = MLX5_ADDR_OF(create_virtio_net_q_in, in, general_obj_in_cmd_hdr);
903906

904907
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
905908
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_NET_Q);
906909
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
907910

908911
obj_context = MLX5_ADDR_OF(create_virtio_net_q_in, in, obj_context);
909-
MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx);
910-
MLX5_SET(virtio_net_q_object, obj_context, hw_used_index, mvq->used_idx);
911912
MLX5_SET(virtio_net_q_object, obj_context, queue_feature_bit_mask_12_3,
912913
mlx_features >> 3);
913914
MLX5_SET(virtio_net_q_object, obj_context, queue_feature_bit_mask_2_0,
@@ -929,17 +930,36 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
929930
MLX5_SET(virtio_q, vq_ctx, queue_index, mvq->index);
930931
MLX5_SET(virtio_q, vq_ctx, queue_size, mvq->num_ent);
931932
MLX5_SET(virtio_q, vq_ctx, virtio_version_1_0,
932-
!!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_F_VERSION_1)));
933-
MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr);
934-
MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr);
935-
MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr);
936-
vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]];
937-
if (vq_mr)
938-
MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, vq_mr->mkey);
939-
940-
vq_desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
941-
if (vq_desc_mr && MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported))
942-
MLX5_SET(virtio_q, vq_ctx, desc_group_mkey, vq_desc_mr->mkey);
933+
!!(features & BIT_ULL(VIRTIO_F_VERSION_1)));
934+
935+
if (filled) {
936+
MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx);
937+
MLX5_SET(virtio_net_q_object, obj_context, hw_used_index, mvq->used_idx);
938+
939+
MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr);
940+
MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr);
941+
MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr);
942+
943+
vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]];
944+
if (vq_mr)
945+
MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, vq_mr->mkey);
946+
947+
vq_desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
948+
if (vq_desc_mr &&
949+
MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported))
950+
MLX5_SET(virtio_q, vq_ctx, desc_group_mkey, vq_desc_mr->mkey);
951+
} else {
952+
/* If there is no mr update, make sure that the existing ones are set
953+
* modify to ready.
954+
*/
955+
vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]];
956+
if (vq_mr)
957+
mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY;
958+
959+
vq_desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
960+
if (vq_desc_mr)
961+
mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY;
962+
}
943963

944964
MLX5_SET(virtio_q, vq_ctx, umem_1_id, mvq->umem1.id);
945965
MLX5_SET(virtio_q, vq_ctx, umem_1_size, mvq->umem1.size);
@@ -959,12 +979,15 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
959979
kfree(in);
960980
mvq->virtq_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
961981

962-
mlx5_vdpa_get_mr(mvdev, vq_mr);
963-
mvq->vq_mr = vq_mr;
982+
if (filled) {
983+
mlx5_vdpa_get_mr(mvdev, vq_mr);
984+
mvq->vq_mr = vq_mr;
964985

965-
if (vq_desc_mr && MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported)) {
966-
mlx5_vdpa_get_mr(mvdev, vq_desc_mr);
967-
mvq->desc_mr = vq_desc_mr;
986+
if (vq_desc_mr &&
987+
MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported)) {
988+
mlx5_vdpa_get_mr(mvdev, vq_desc_mr);
989+
mvq->desc_mr = vq_desc_mr;
990+
}
968991
}
969992

970993
return 0;
@@ -1442,7 +1465,9 @@ static void dealloc_vector(struct mlx5_vdpa_net *ndev,
14421465
}
14431466
}
14441467

1445-
static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
1468+
static int setup_vq(struct mlx5_vdpa_net *ndev,
1469+
struct mlx5_vdpa_virtqueue *mvq,
1470+
bool filled)
14461471
{
14471472
u16 idx = mvq->index;
14481473
int err;
@@ -1471,7 +1496,7 @@ static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
14711496
goto err_connect;
14721497

14731498
alloc_vector(ndev, mvq);
1474-
err = create_virtqueue(ndev, mvq);
1499+
err = create_virtqueue(ndev, mvq, filled);
14751500
if (err)
14761501
goto err_vq;
14771502

@@ -2062,7 +2087,7 @@ static int change_num_qps(struct mlx5_vdpa_dev *mvdev, int newqps)
20622087
} else {
20632088
ndev->cur_num_vqs = 2 * newqps;
20642089
for (i = cur_qps * 2; i < 2 * newqps; i++) {
2065-
err = setup_vq(ndev, &ndev->vqs[i]);
2090+
err = setup_vq(ndev, &ndev->vqs[i], true);
20662091
if (err)
20672092
goto clean_added;
20682093
}
@@ -2558,14 +2583,14 @@ static int verify_driver_features(struct mlx5_vdpa_dev *mvdev, u64 features)
25582583
return 0;
25592584
}
25602585

2561-
static int setup_virtqueues(struct mlx5_vdpa_dev *mvdev)
2586+
static int setup_virtqueues(struct mlx5_vdpa_dev *mvdev, bool filled)
25622587
{
25632588
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
25642589
int err;
25652590
int i;
25662591

25672592
for (i = 0; i < mvdev->max_vqs; i++) {
2568-
err = setup_vq(ndev, &ndev->vqs[i]);
2593+
err = setup_vq(ndev, &ndev->vqs[i], filled);
25692594
if (err)
25702595
goto err_vq;
25712596
}
@@ -2877,7 +2902,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
28772902

28782903
if (teardown) {
28792904
restore_channels_info(ndev);
2880-
err = setup_vq_resources(ndev);
2905+
err = setup_vq_resources(ndev, true);
28812906
if (err)
28822907
return err;
28832908
}
@@ -2888,7 +2913,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
28882913
}
28892914

28902915
/* reslock must be held for this function */
2891-
static int setup_vq_resources(struct mlx5_vdpa_net *ndev)
2916+
static int setup_vq_resources(struct mlx5_vdpa_net *ndev, bool filled)
28922917
{
28932918
struct mlx5_vdpa_dev *mvdev = &ndev->mvdev;
28942919
int err;
@@ -2906,7 +2931,7 @@ static int setup_vq_resources(struct mlx5_vdpa_net *ndev)
29062931
if (err)
29072932
goto err_setup;
29082933

2909-
err = setup_virtqueues(mvdev);
2934+
err = setup_virtqueues(mvdev, filled);
29102935
if (err) {
29112936
mlx5_vdpa_warn(mvdev, "setup_virtqueues\n");
29122937
goto err_setup;
@@ -3000,7 +3025,7 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
30003025
goto err_setup;
30013026
}
30023027
register_link_notifier(ndev);
3003-
err = setup_vq_resources(ndev);
3028+
err = setup_vq_resources(ndev, true);
30043029
if (err) {
30053030
mlx5_vdpa_warn(mvdev, "failed to setup driver\n");
30063031
goto err_driver;

0 commit comments

Comments
 (0)