Skip to content

Commit 61674c1

Browse files
dtatuleamstsirkin
authored andcommitted
vdpa/mlx5: Use async API for vq modify commands
Switch firmware vq modify command to be issued via the async API to allow future parallelization. The new refactored function applies the modify on a range of vqs and waits for their execution to complete. For now the command is still used in a serial fashion. A later patch will switch to modifying multiple vqs in parallel. Signed-off-by: Dragos Tatulea <[email protected]> Reviewed-by: Tariq Toukan <[email protected]> Message-Id: <[email protected]> Signed-off-by: Michael S. Tsirkin <[email protected]> Acked-by: Eugenio Pérez <[email protected]> Tested-by: Lei Yang <[email protected]>
1 parent 1fcdf43 commit 61674c1

File tree

1 file changed

+106
-48
lines changed

1 file changed

+106
-48
lines changed

drivers/vdpa/mlx5/net/mlx5_vnet.c

Lines changed: 106 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -1189,6 +1189,11 @@ struct mlx5_virtqueue_query_mem {
11891189
u8 out[MLX5_ST_SZ_BYTES(query_virtio_net_q_out)];
11901190
};
11911191

1192+
struct mlx5_virtqueue_modify_mem {
1193+
u8 in[MLX5_ST_SZ_BYTES(modify_virtio_net_q_in)];
1194+
u8 out[MLX5_ST_SZ_BYTES(modify_virtio_net_q_out)];
1195+
};
1196+
11921197
static void fill_query_virtqueue_cmd(struct mlx5_vdpa_net *ndev,
11931198
struct mlx5_vdpa_virtqueue *mvq,
11941199
struct mlx5_virtqueue_query_mem *cmd)
@@ -1298,51 +1303,30 @@ static bool modifiable_virtqueue_fields(struct mlx5_vdpa_virtqueue *mvq)
12981303
return true;
12991304
}
13001305

1301-
static int modify_virtqueue(struct mlx5_vdpa_net *ndev,
1302-
struct mlx5_vdpa_virtqueue *mvq,
1303-
int state)
1306+
static void fill_modify_virtqueue_cmd(struct mlx5_vdpa_net *ndev,
1307+
struct mlx5_vdpa_virtqueue *mvq,
1308+
int state,
1309+
struct mlx5_virtqueue_modify_mem *cmd)
13041310
{
1305-
int inlen = MLX5_ST_SZ_BYTES(modify_virtio_net_q_in);
1306-
u32 out[MLX5_ST_SZ_DW(modify_virtio_net_q_out)] = {};
13071311
struct mlx5_vdpa_dev *mvdev = &ndev->mvdev;
13081312
struct mlx5_vdpa_mr *desc_mr = NULL;
13091313
struct mlx5_vdpa_mr *vq_mr = NULL;
1310-
bool state_change = false;
13111314
void *obj_context;
13121315
void *cmd_hdr;
13131316
void *vq_ctx;
1314-
void *in;
1315-
int err;
1316-
1317-
if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_NONE)
1318-
return 0;
1319-
1320-
if (!modifiable_virtqueue_fields(mvq))
1321-
return -EINVAL;
13221317

1323-
in = kzalloc(inlen, GFP_KERNEL);
1324-
if (!in)
1325-
return -ENOMEM;
1326-
1327-
cmd_hdr = MLX5_ADDR_OF(modify_virtio_net_q_in, in, general_obj_in_cmd_hdr);
1318+
cmd_hdr = MLX5_ADDR_OF(modify_virtio_net_q_in, cmd->in, general_obj_in_cmd_hdr);
13281319

13291320
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
13301321
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_NET_Q);
13311322
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id);
13321323
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
13331324

1334-
obj_context = MLX5_ADDR_OF(modify_virtio_net_q_in, in, obj_context);
1325+
obj_context = MLX5_ADDR_OF(modify_virtio_net_q_in, cmd->in, obj_context);
13351326
vq_ctx = MLX5_ADDR_OF(virtio_net_q_object, obj_context, virtio_q_context);
13361327

1337-
if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE) {
1338-
if (!is_valid_state_change(mvq->fw_state, state, is_resumable(ndev))) {
1339-
err = -EINVAL;
1340-
goto done;
1341-
}
1342-
1328+
if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE)
13431329
MLX5_SET(virtio_net_q_object, obj_context, state, state);
1344-
state_change = true;
1345-
}
13461330

13471331
if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_ADDRS) {
13481332
MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr);
@@ -1388,38 +1372,36 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev,
13881372
}
13891373

13901374
MLX5_SET64(virtio_net_q_object, obj_context, modify_field_select, mvq->modified_fields);
1391-
err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
1392-
if (err)
1393-
goto done;
1375+
}
13941376

1395-
if (state_change)
1396-
mvq->fw_state = state;
1377+
static void modify_virtqueue_end(struct mlx5_vdpa_net *ndev,
1378+
struct mlx5_vdpa_virtqueue *mvq,
1379+
int state)
1380+
{
1381+
struct mlx5_vdpa_dev *mvdev = &ndev->mvdev;
13971382

13981383
if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) {
1384+
unsigned int asid = mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP];
1385+
struct mlx5_vdpa_mr *vq_mr = mvdev->mr[asid];
1386+
13991387
mlx5_vdpa_put_mr(mvdev, mvq->vq_mr);
14001388
mlx5_vdpa_get_mr(mvdev, vq_mr);
14011389
mvq->vq_mr = vq_mr;
14021390
}
14031391

14041392
if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) {
1393+
unsigned int asid = mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP];
1394+
struct mlx5_vdpa_mr *desc_mr = mvdev->mr[asid];
1395+
14051396
mlx5_vdpa_put_mr(mvdev, mvq->desc_mr);
14061397
mlx5_vdpa_get_mr(mvdev, desc_mr);
14071398
mvq->desc_mr = desc_mr;
14081399
}
14091400

1410-
mvq->modified_fields = 0;
1411-
1412-
done:
1413-
kfree(in);
1414-
return err;
1415-
}
1401+
if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE)
1402+
mvq->fw_state = state;
14161403

1417-
static int modify_virtqueue_state(struct mlx5_vdpa_net *ndev,
1418-
struct mlx5_vdpa_virtqueue *mvq,
1419-
unsigned int state)
1420-
{
1421-
mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_STATE;
1422-
return modify_virtqueue(ndev, mvq, state);
1404+
mvq->modified_fields = 0;
14231405
}
14241406

14251407
static int counter_set_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
@@ -1572,6 +1554,82 @@ static int setup_vq(struct mlx5_vdpa_net *ndev,
15721554
return err;
15731555
}
15741556

1557+
static int modify_virtqueues(struct mlx5_vdpa_net *ndev, int start_vq, int num_vqs, int state)
1558+
{
1559+
struct mlx5_vdpa_dev *mvdev = &ndev->mvdev;
1560+
struct mlx5_virtqueue_modify_mem *cmd_mem;
1561+
struct mlx5_vdpa_async_cmd *cmds;
1562+
int err = 0;
1563+
1564+
WARN(start_vq + num_vqs > mvdev->max_vqs, "modify vq range invalid [%d, %d), max_vqs: %u\n",
1565+
start_vq, start_vq + num_vqs, mvdev->max_vqs);
1566+
1567+
cmds = kvcalloc(num_vqs, sizeof(*cmds), GFP_KERNEL);
1568+
cmd_mem = kvcalloc(num_vqs, sizeof(*cmd_mem), GFP_KERNEL);
1569+
if (!cmds || !cmd_mem) {
1570+
err = -ENOMEM;
1571+
goto done;
1572+
}
1573+
1574+
for (int i = 0; i < num_vqs; i++) {
1575+
struct mlx5_vdpa_async_cmd *cmd = &cmds[i];
1576+
struct mlx5_vdpa_virtqueue *mvq;
1577+
int vq_idx = start_vq + i;
1578+
1579+
mvq = &ndev->vqs[vq_idx];
1580+
1581+
if (!modifiable_virtqueue_fields(mvq)) {
1582+
err = -EINVAL;
1583+
goto done;
1584+
}
1585+
1586+
if (mvq->fw_state != state) {
1587+
if (!is_valid_state_change(mvq->fw_state, state, is_resumable(ndev))) {
1588+
err = -EINVAL;
1589+
goto done;
1590+
}
1591+
1592+
mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_STATE;
1593+
}
1594+
1595+
cmd->in = &cmd_mem[i].in;
1596+
cmd->inlen = sizeof(cmd_mem[i].in);
1597+
cmd->out = &cmd_mem[i].out;
1598+
cmd->outlen = sizeof(cmd_mem[i].out);
1599+
fill_modify_virtqueue_cmd(ndev, mvq, state, &cmd_mem[i]);
1600+
}
1601+
1602+
err = mlx5_vdpa_exec_async_cmds(&ndev->mvdev, cmds, num_vqs);
1603+
if (err) {
1604+
mlx5_vdpa_err(mvdev, "error issuing modify cmd for vq range [%d, %d)\n",
1605+
start_vq, start_vq + num_vqs);
1606+
goto done;
1607+
}
1608+
1609+
for (int i = 0; i < num_vqs; i++) {
1610+
struct mlx5_vdpa_async_cmd *cmd = &cmds[i];
1611+
struct mlx5_vdpa_virtqueue *mvq;
1612+
int vq_idx = start_vq + i;
1613+
1614+
mvq = &ndev->vqs[vq_idx];
1615+
1616+
if (cmd->err) {
1617+
mlx5_vdpa_err(mvdev, "modify vq %d failed, state: %d -> %d, err: %d\n",
1618+
vq_idx, mvq->fw_state, state, err);
1619+
if (!err)
1620+
err = cmd->err;
1621+
continue;
1622+
}
1623+
1624+
modify_virtqueue_end(ndev, mvq, state);
1625+
}
1626+
1627+
done:
1628+
kvfree(cmd_mem);
1629+
kvfree(cmds);
1630+
return err;
1631+
}
1632+
15751633
static int suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
15761634
{
15771635
struct mlx5_virtq_attr attr;
@@ -1583,7 +1641,7 @@ static int suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mv
15831641
if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY)
15841642
return 0;
15851643

1586-
err = modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND);
1644+
err = modify_virtqueues(ndev, mvq->index, 1, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND);
15871645
if (err) {
15881646
mlx5_vdpa_err(&ndev->mvdev, "modify to suspend failed, err: %d\n", err);
15891647
return err;
@@ -1630,7 +1688,7 @@ static int resume_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq
16301688
/* Due to a FW quirk we need to modify the VQ fields first then change state.
16311689
* This should be fixed soon. After that, a single command can be used.
16321690
*/
1633-
err = modify_virtqueue(ndev, mvq, 0);
1691+
err = modify_virtqueues(ndev, mvq->index, 1, mvq->fw_state);
16341692
if (err) {
16351693
mlx5_vdpa_err(&ndev->mvdev,
16361694
"modify vq properties failed for vq %u, err: %d\n",
@@ -1652,7 +1710,7 @@ static int resume_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq
16521710
return -EINVAL;
16531711
}
16541712

1655-
err = modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY);
1713+
err = modify_virtqueues(ndev, mvq->index, 1, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY);
16561714
if (err)
16571715
mlx5_vdpa_err(&ndev->mvdev, "modify to resume failed for vq %u, err: %d\n",
16581716
mvq->index, err);

0 commit comments

Comments
 (0)