Skip to content

Commit a284f09

Browse files
mikechristiemstsirkin
authored andcommitted
vhost: Fix crash during early vhost_transport_send_pkt calls
If userspace does VHOST_VSOCK_SET_GUEST_CID before VHOST_SET_OWNER we can race where: 1. thread0 calls vhost_transport_send_pkt -> vhost_work_queue 2. thread1 does VHOST_SET_OWNER which calls vhost_worker_create. 3. vhost_worker_create will set the dev->worker pointer before setting the worker->vtsk pointer. 4. thread0's vhost_work_queue will see the dev->worker pointer is set and try to call vhost_task_wake using not yet set worker->vtsk pointer. 5. We then crash since vtsk is NULL. Before commit 6e890c5 ("vhost: use vhost_tasks for worker threads"), we only had the worker pointer so we could just check it to see if VHOST_SET_OWNER has been done. After that commit we have the vhost_worker and vhost_task pointer, so we can now hit the bug above. This patch embeds the vhost_worker in the vhost_dev and moves the work list initialization back to vhost_dev_init, so we can just check the worker.vtsk pointer to check if VHOST_SET_OWNER has been done like before. Fixes: 6e890c5 ("vhost: use vhost_tasks for worker threads") Signed-off-by: Mike Christie <[email protected]> Message-Id: <[email protected]> Signed-off-by: Michael S. Tsirkin <[email protected]> Reported-by: [email protected] Reviewed-by: Stefano Garzarella <[email protected]>
1 parent 1f5d2e3 commit a284f09

File tree

2 files changed

+18
-34
lines changed

2 files changed

+18
-34
lines changed

drivers/vhost/vhost.c

Lines changed: 17 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -235,7 +235,7 @@ void vhost_dev_flush(struct vhost_dev *dev)
235235
{
236236
struct vhost_flush_struct flush;
237237

238-
if (dev->worker) {
238+
if (dev->worker.vtsk) {
239239
init_completion(&flush.wait_event);
240240
vhost_work_init(&flush.work, vhost_flush_work);
241241

@@ -247,24 +247,24 @@ EXPORT_SYMBOL_GPL(vhost_dev_flush);
247247

248248
void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
249249
{
250-
if (!dev->worker)
250+
if (!dev->worker.vtsk)
251251
return;
252252

253253
if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
254254
/* We can only add the work to the list after we're
255255
* sure it was not in the list.
256256
* test_and_set_bit() implies a memory barrier.
257257
*/
258-
llist_add(&work->node, &dev->worker->work_list);
259-
vhost_task_wake(dev->worker->vtsk);
258+
llist_add(&work->node, &dev->worker.work_list);
259+
vhost_task_wake(dev->worker.vtsk);
260260
}
261261
}
262262
EXPORT_SYMBOL_GPL(vhost_work_queue);
263263

264264
/* A lockless hint for busy polling code to exit the loop */
265265
bool vhost_has_work(struct vhost_dev *dev)
266266
{
267-
return dev->worker && !llist_empty(&dev->worker->work_list);
267+
return !llist_empty(&dev->worker.work_list);
268268
}
269269
EXPORT_SYMBOL_GPL(vhost_has_work);
270270

@@ -456,7 +456,8 @@ void vhost_dev_init(struct vhost_dev *dev,
456456
dev->umem = NULL;
457457
dev->iotlb = NULL;
458458
dev->mm = NULL;
459-
dev->worker = NULL;
459+
memset(&dev->worker, 0, sizeof(dev->worker));
460+
init_llist_head(&dev->worker.work_list);
460461
dev->iov_limit = iov_limit;
461462
dev->weight = weight;
462463
dev->byte_weight = byte_weight;
@@ -530,47 +531,30 @@ static void vhost_detach_mm(struct vhost_dev *dev)
530531

531532
static void vhost_worker_free(struct vhost_dev *dev)
532533
{
533-
struct vhost_worker *worker = dev->worker;
534-
535-
if (!worker)
534+
if (!dev->worker.vtsk)
536535
return;
537536

538-
dev->worker = NULL;
539-
WARN_ON(!llist_empty(&worker->work_list));
540-
vhost_task_stop(worker->vtsk);
541-
kfree(worker);
537+
WARN_ON(!llist_empty(&dev->worker.work_list));
538+
vhost_task_stop(dev->worker.vtsk);
539+
dev->worker.kcov_handle = 0;
540+
dev->worker.vtsk = NULL;
542541
}
543542

544543
static int vhost_worker_create(struct vhost_dev *dev)
545544
{
546-
struct vhost_worker *worker;
547545
struct vhost_task *vtsk;
548546
char name[TASK_COMM_LEN];
549-
int ret;
550-
551-
worker = kzalloc(sizeof(*worker), GFP_KERNEL_ACCOUNT);
552-
if (!worker)
553-
return -ENOMEM;
554547

555-
dev->worker = worker;
556-
worker->kcov_handle = kcov_common_handle();
557-
init_llist_head(&worker->work_list);
558548
snprintf(name, sizeof(name), "vhost-%d", current->pid);
559549

560-
vtsk = vhost_task_create(vhost_worker, worker, name);
561-
if (!vtsk) {
562-
ret = -ENOMEM;
563-
goto free_worker;
564-
}
550+
vtsk = vhost_task_create(vhost_worker, &dev->worker, name);
551+
if (!vtsk)
552+
return -ENOMEM;
565553

566-
worker->vtsk = vtsk;
554+
dev->worker.kcov_handle = kcov_common_handle();
555+
dev->worker.vtsk = vtsk;
567556
vhost_task_start(vtsk);
568557
return 0;
569-
570-
free_worker:
571-
kfree(worker);
572-
dev->worker = NULL;
573-
return ret;
574558
}
575559

576560
/* Caller should have device mutex */

drivers/vhost/vhost.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -154,7 +154,7 @@ struct vhost_dev {
154154
struct vhost_virtqueue **vqs;
155155
int nvqs;
156156
struct eventfd_ctx *log_ctx;
157-
struct vhost_worker *worker;
157+
struct vhost_worker worker;
158158
struct vhost_iotlb *umem;
159159
struct vhost_iotlb *iotlb;
160160
spinlock_t iotlb_lock;

0 commit comments

Comments
 (0)