Skip to content

Commit 4023784

Browse files
jasowangmstsirkin
authored andcommitted
vhost-vdpa: multiqueue support
This patch implements the multiqueue support for vhost-vdpa. This is done simply by reading the number of queue pairs from the config space and initialize the datapath and control path net client. Signed-off-by: Jason Wang <[email protected]> Message-Id: <[email protected]> Reviewed-by: Michael S. Tsirkin <[email protected]> Signed-off-by: Michael S. Tsirkin <[email protected]>
1 parent 22288fe commit 4023784

File tree

2 files changed

+97
-10
lines changed

2 files changed

+97
-10
lines changed

hw/virtio/vhost-vdpa.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -632,7 +632,7 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
632632
vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
633633
}
634634

635-
if (vhost_vdpa_one_time_request(dev)) {
635+
if (dev->vq_index + dev->nvqs != dev->last_index) {
636636
return 0;
637637
}
638638

net/vhost-vdpa.c

Lines changed: 96 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
#include "qemu/error-report.h"
1919
#include "qemu/option.h"
2020
#include "qapi/error.h"
21+
#include <linux/vhost.h>
2122
#include <sys/ioctl.h>
2223
#include <err.h>
2324
#include "standard-headers/linux/virtio_net.h"
@@ -51,6 +52,14 @@ const int vdpa_feature_bits[] = {
5152
VIRTIO_NET_F_HOST_UFO,
5253
VIRTIO_NET_F_MRG_RXBUF,
5354
VIRTIO_NET_F_MTU,
55+
VIRTIO_NET_F_CTRL_RX,
56+
VIRTIO_NET_F_CTRL_RX_EXTRA,
57+
VIRTIO_NET_F_CTRL_VLAN,
58+
VIRTIO_NET_F_GUEST_ANNOUNCE,
59+
VIRTIO_NET_F_CTRL_MAC_ADDR,
60+
VIRTIO_NET_F_RSS,
61+
VIRTIO_NET_F_MQ,
62+
VIRTIO_NET_F_CTRL_VQ,
5463
VIRTIO_F_IOMMU_PLATFORM,
5564
VIRTIO_F_RING_PACKED,
5665
VIRTIO_NET_F_RSS,
@@ -81,7 +90,8 @@ static int vhost_vdpa_net_check_device_id(struct vhost_net *net)
8190
return ret;
8291
}
8392

84-
static int vhost_vdpa_add(NetClientState *ncs, void *be)
93+
static int vhost_vdpa_add(NetClientState *ncs, void *be,
94+
int queue_pair_index, int nvqs)
8595
{
8696
VhostNetOptions options;
8797
struct vhost_net *net = NULL;
@@ -94,7 +104,7 @@ static int vhost_vdpa_add(NetClientState *ncs, void *be)
94104
options.net_backend = ncs;
95105
options.opaque = be;
96106
options.busyloop_timeout = 0;
97-
options.nvqs = 2;
107+
options.nvqs = nvqs;
98108

99109
net = vhost_net_init(&options);
100110
if (!net) {
@@ -172,31 +182,81 @@ static NetClientInfo net_vhost_vdpa_info = {
172182
static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
173183
const char *device,
174184
const char *name,
175-
int vdpa_device_fd)
185+
int vdpa_device_fd,
186+
int queue_pair_index,
187+
int nvqs,
188+
bool is_datapath)
176189
{
177190
NetClientState *nc = NULL;
178191
VhostVDPAState *s;
179192
int ret = 0;
180193
assert(name);
181-
nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device, name);
194+
if (is_datapath) {
195+
nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device,
196+
name);
197+
} else {
198+
nc = qemu_new_net_control_client(&net_vhost_vdpa_info, peer,
199+
device, name);
200+
}
182201
snprintf(nc->info_str, sizeof(nc->info_str), TYPE_VHOST_VDPA);
183202
s = DO_UPCAST(VhostVDPAState, nc, nc);
184203

185204
s->vhost_vdpa.device_fd = vdpa_device_fd;
186-
ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa);
205+
s->vhost_vdpa.index = queue_pair_index;
206+
ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
187207
if (ret) {
188208
qemu_del_net_client(nc);
189209
return NULL;
190210
}
191211
return nc;
192212
}
193213

214+
static int vhost_vdpa_get_max_queue_pairs(int fd, int *has_cvq, Error **errp)
215+
{
216+
unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
217+
struct vhost_vdpa_config *config;
218+
__virtio16 *max_queue_pairs;
219+
uint64_t features;
220+
int ret;
221+
222+
ret = ioctl(fd, VHOST_GET_FEATURES, &features);
223+
if (ret) {
224+
error_setg(errp, "Fail to query features from vhost-vDPA device");
225+
return ret;
226+
}
227+
228+
if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) {
229+
*has_cvq = 1;
230+
} else {
231+
*has_cvq = 0;
232+
}
233+
234+
if (features & (1 << VIRTIO_NET_F_MQ)) {
235+
config = g_malloc0(config_size + sizeof(*max_queue_pairs));
236+
config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs);
237+
config->len = sizeof(*max_queue_pairs);
238+
239+
ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config);
240+
if (ret) {
241+
error_setg(errp, "Fail to get config from vhost-vDPA device");
242+
return -ret;
243+
}
244+
245+
max_queue_pairs = (__virtio16 *)&config->buf;
246+
247+
return lduw_le_p(max_queue_pairs);
248+
}
249+
250+
return 1;
251+
}
252+
194253
int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
195254
NetClientState *peer, Error **errp)
196255
{
197256
const NetdevVhostVDPAOptions *opts;
198257
int vdpa_device_fd;
199-
NetClientState *nc;
258+
NetClientState **ncs, *nc;
259+
int queue_pairs, i, has_cvq = 0;
200260

201261
assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
202262
opts = &netdev->u.vhost_vdpa;
@@ -206,11 +266,38 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
206266
return -errno;
207267
}
208268

209-
nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, vdpa_device_fd);
210-
if (!nc) {
269+
queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd,
270+
&has_cvq, errp);
271+
if (queue_pairs < 0) {
211272
qemu_close(vdpa_device_fd);
212-
return -1;
273+
return queue_pairs;
274+
}
275+
276+
ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
277+
278+
for (i = 0; i < queue_pairs; i++) {
279+
ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
280+
vdpa_device_fd, i, 2, true);
281+
if (!ncs[i])
282+
goto err;
213283
}
214284

285+
if (has_cvq) {
286+
nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
287+
vdpa_device_fd, i, 1, false);
288+
if (!nc)
289+
goto err;
290+
}
291+
292+
g_free(ncs);
215293
return 0;
294+
295+
err:
296+
if (i) {
297+
qemu_del_net_client(ncs[0]);
298+
}
299+
qemu_close(vdpa_device_fd);
300+
g_free(ncs);
301+
302+
return -1;
216303
}

0 commit comments

Comments
 (0)