Skip to content

Commit 1fcdf43

Browse files
dtatuleamstsirkin
authored andcommitted
vdpa/mlx5: Use async API for vq query command
Switch firmware vq query command to be issued via the async API to allow future parallelization. For now the command is still serial but the infrastructure is there to issue commands in parallel, including ratelimiting the number of issued async commands to firmware. A later patch will switch to issuing more commands at a time. Signed-off-by: Dragos Tatulea <[email protected]> Reviewed-by: Tariq Toukan <[email protected]> Message-Id: <[email protected]> Signed-off-by: Michael S. Tsirkin <[email protected]> Tested-by: Lei Yang <[email protected]>
1 parent d89d58f commit 1fcdf43

File tree

2 files changed

+78
-25
lines changed

2 files changed

+78
-25
lines changed

drivers/vdpa/mlx5/core/mlx5_vdpa.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,8 @@ struct mlx5_vdpa_dev {
103103
struct workqueue_struct *wq;
104104
unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS];
105105
bool suspended;
106+
107+
struct mlx5_async_ctx async_ctx;
106108
};
107109

108110
struct mlx5_vdpa_async_cmd {

drivers/vdpa/mlx5/net/mlx5_vnet.c

Lines changed: 76 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -1184,40 +1184,87 @@ struct mlx5_virtq_attr {
11841184
u16 used_index;
11851185
};
11861186

1187-
static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq,
1188-
struct mlx5_virtq_attr *attr)
1189-
{
1190-
int outlen = MLX5_ST_SZ_BYTES(query_virtio_net_q_out);
1191-
u32 in[MLX5_ST_SZ_DW(query_virtio_net_q_in)] = {};
1192-
void *out;
1193-
void *obj_context;
1194-
void *cmd_hdr;
1195-
int err;
1196-
1197-
out = kzalloc(outlen, GFP_KERNEL);
1198-
if (!out)
1199-
return -ENOMEM;
1187+
struct mlx5_virtqueue_query_mem {
1188+
u8 in[MLX5_ST_SZ_BYTES(query_virtio_net_q_in)];
1189+
u8 out[MLX5_ST_SZ_BYTES(query_virtio_net_q_out)];
1190+
};
12001191

1201-
cmd_hdr = MLX5_ADDR_OF(query_virtio_net_q_in, in, general_obj_in_cmd_hdr);
1192+
static void fill_query_virtqueue_cmd(struct mlx5_vdpa_net *ndev,
1193+
struct mlx5_vdpa_virtqueue *mvq,
1194+
struct mlx5_virtqueue_query_mem *cmd)
1195+
{
1196+
void *cmd_hdr = MLX5_ADDR_OF(query_virtio_net_q_in, cmd->in, general_obj_in_cmd_hdr);
12021197

12031198
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
12041199
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_NET_Q);
12051200
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id);
12061201
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
1207-
err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, outlen);
1208-
if (err)
1209-
goto err_cmd;
1202+
}
1203+
1204+
static void query_virtqueue_end(struct mlx5_vdpa_net *ndev,
1205+
struct mlx5_virtqueue_query_mem *cmd,
1206+
struct mlx5_virtq_attr *attr)
1207+
{
1208+
void *obj_context = MLX5_ADDR_OF(query_virtio_net_q_out, cmd->out, obj_context);
12101209

1211-
obj_context = MLX5_ADDR_OF(query_virtio_net_q_out, out, obj_context);
12121210
memset(attr, 0, sizeof(*attr));
12131211
attr->state = MLX5_GET(virtio_net_q_object, obj_context, state);
12141212
attr->available_index = MLX5_GET(virtio_net_q_object, obj_context, hw_available_index);
12151213
attr->used_index = MLX5_GET(virtio_net_q_object, obj_context, hw_used_index);
1216-
kfree(out);
1217-
return 0;
1214+
}
12181215

1219-
err_cmd:
1220-
kfree(out);
1216+
static int query_virtqueues(struct mlx5_vdpa_net *ndev,
1217+
int start_vq,
1218+
int num_vqs,
1219+
struct mlx5_virtq_attr *attrs)
1220+
{
1221+
struct mlx5_vdpa_dev *mvdev = &ndev->mvdev;
1222+
struct mlx5_virtqueue_query_mem *cmd_mem;
1223+
struct mlx5_vdpa_async_cmd *cmds;
1224+
int err = 0;
1225+
1226+
WARN(start_vq + num_vqs > mvdev->max_vqs, "query vq range invalid [%d, %d), max_vqs: %u\n",
1227+
start_vq, start_vq + num_vqs, mvdev->max_vqs);
1228+
1229+
cmds = kvcalloc(num_vqs, sizeof(*cmds), GFP_KERNEL);
1230+
cmd_mem = kvcalloc(num_vqs, sizeof(*cmd_mem), GFP_KERNEL);
1231+
if (!cmds || !cmd_mem) {
1232+
err = -ENOMEM;
1233+
goto done;
1234+
}
1235+
1236+
for (int i = 0; i < num_vqs; i++) {
1237+
cmds[i].in = &cmd_mem[i].in;
1238+
cmds[i].inlen = sizeof(cmd_mem[i].in);
1239+
cmds[i].out = &cmd_mem[i].out;
1240+
cmds[i].outlen = sizeof(cmd_mem[i].out);
1241+
fill_query_virtqueue_cmd(ndev, &ndev->vqs[start_vq + i], &cmd_mem[i]);
1242+
}
1243+
1244+
err = mlx5_vdpa_exec_async_cmds(&ndev->mvdev, cmds, num_vqs);
1245+
if (err) {
1246+
mlx5_vdpa_err(mvdev, "error issuing query cmd for vq range [%d, %d): %d\n",
1247+
start_vq, start_vq + num_vqs, err);
1248+
goto done;
1249+
}
1250+
1251+
for (int i = 0; i < num_vqs; i++) {
1252+
struct mlx5_vdpa_async_cmd *cmd = &cmds[i];
1253+
int vq_idx = start_vq + i;
1254+
1255+
if (cmd->err) {
1256+
mlx5_vdpa_err(mvdev, "query vq %d failed, err: %d\n", vq_idx, err);
1257+
if (!err)
1258+
err = cmd->err;
1259+
continue;
1260+
}
1261+
1262+
query_virtqueue_end(ndev, &cmd_mem[i], &attrs[i]);
1263+
}
1264+
1265+
done:
1266+
kvfree(cmd_mem);
1267+
kvfree(cmds);
12211268
return err;
12221269
}
12231270

@@ -1542,7 +1589,7 @@ static int suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mv
15421589
return err;
15431590
}
15441591

1545-
err = query_virtqueue(ndev, mvq, &attr);
1592+
err = query_virtqueues(ndev, mvq->index, 1, &attr);
15461593
if (err) {
15471594
mlx5_vdpa_err(&ndev->mvdev, "failed to query virtqueue, err: %d\n", err);
15481595
return err;
@@ -2528,7 +2575,7 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa
25282575
return 0;
25292576
}
25302577

2531-
err = query_virtqueue(ndev, mvq, &attr);
2578+
err = query_virtqueues(ndev, mvq->index, 1, &attr);
25322579
if (err) {
25332580
mlx5_vdpa_err(mvdev, "failed to query virtqueue\n");
25342581
return err;
@@ -2879,7 +2926,7 @@ static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqu
28792926
int err;
28802927

28812928
if (mvq->initialized) {
2882-
err = query_virtqueue(ndev, mvq, &attr);
2929+
err = query_virtqueues(ndev, mvq->index, 1, &attr);
28832930
if (err)
28842931
return err;
28852932
}
@@ -3854,6 +3901,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
38543901
ndev->rqt_size = 1;
38553902
}
38563903

3904+
mlx5_cmd_init_async_ctx(mdev, &mvdev->async_ctx);
3905+
38573906
ndev->mvdev.mlx_features = device_features;
38583907
mvdev->vdev.dma_dev = &mdev->pdev->dev;
38593908
err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
@@ -3935,6 +3984,8 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *
39353984
mvdev->wq = NULL;
39363985
destroy_workqueue(wq);
39373986
mgtdev->ndev = NULL;
3987+
3988+
mlx5_cmd_cleanup_async_ctx(&mvdev->async_ctx);
39383989
}
39393990

39403991
static int mlx5_vdpa_set_attr(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *dev,

0 commit comments

Comments
 (0)