Skip to content

Commit b348ca2

Browse files
stefanhaRHkevmw
authored andcommitted
virtio-scsi: introduce event and ctrl virtqueue locks
Virtqueues are not thread-safe. Until now this was not a major issue since all virtqueue processing happened in the same thread. The ctrl queue's Task Management Function (TMF) requests sometimes need the main loop, so a BH was used to schedule the virtqueue completion back in the thread that has virtqueue access. When IOThread Virtqueue Mapping is introduced in later commits, event and ctrl virtqueue accesses from other threads will become necessary. Introduce an optional per-virtqueue lock so the event and ctrl virtqueues can be protected in the commits that follow. The addition of the ctrl virtqueue lock makes virtio_scsi_complete_req_from_main_loop() and its BH unnecessary. Instead, take the ctrl virtqueue lock from the main loop thread. The cmd virtqueue does not have a lock because the entirety of SCSI command processing happens in one thread. Only one thread accesses the cmd virtqueue and a lock is unnecessary. Signed-off-by: Stefan Hajnoczi <[email protected]> Reviewed-by: Kevin Wolf <[email protected]> Message-ID: <[email protected]> Tested-by: Peter Krempa <[email protected]> Signed-off-by: Kevin Wolf <[email protected]>
1 parent 1cf18cc commit b348ca2

File tree

2 files changed

+49
-38
lines changed

2 files changed

+49
-38
lines changed

hw/scsi/virtio-scsi.c

Lines changed: 46 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -102,55 +102,50 @@ static void virtio_scsi_free_req(VirtIOSCSIReq *req)
102102
g_free(req);
103103
}
104104

105-
static void virtio_scsi_complete_req(VirtIOSCSIReq *req)
105+
static void virtio_scsi_complete_req(VirtIOSCSIReq *req, QemuMutex *vq_lock)
106106
{
107107
VirtIOSCSI *s = req->dev;
108108
VirtQueue *vq = req->vq;
109109
VirtIODevice *vdev = VIRTIO_DEVICE(s);
110110

111111
qemu_iovec_from_buf(&req->resp_iov, 0, &req->resp, req->resp_size);
112+
113+
if (vq_lock) {
114+
qemu_mutex_lock(vq_lock);
115+
}
116+
112117
virtqueue_push(vq, &req->elem, req->qsgl.size + req->resp_iov.size);
113118
if (s->dataplane_started && !s->dataplane_fenced) {
114119
virtio_notify_irqfd(vdev, vq);
115120
} else {
116121
virtio_notify(vdev, vq);
117122
}
118123

124+
if (vq_lock) {
125+
qemu_mutex_unlock(vq_lock);
126+
}
127+
119128
if (req->sreq) {
120129
req->sreq->hba_private = NULL;
121130
scsi_req_unref(req->sreq);
122131
}
123132
virtio_scsi_free_req(req);
124133
}
125134

126-
static void virtio_scsi_complete_req_bh(void *opaque)
135+
static void virtio_scsi_bad_req(VirtIOSCSIReq *req, QemuMutex *vq_lock)
127136
{
128-
VirtIOSCSIReq *req = opaque;
137+
virtio_error(VIRTIO_DEVICE(req->dev), "wrong size for virtio-scsi headers");
129138

130-
virtio_scsi_complete_req(req);
131-
}
139+
if (vq_lock) {
140+
qemu_mutex_lock(vq_lock);
141+
}
132142

133-
/*
134-
* Called from virtio_scsi_do_one_tmf_bh() in main loop thread. The main loop
135-
* thread cannot touch the virtqueue since that could race with an IOThread.
136-
*/
137-
static void virtio_scsi_complete_req_from_main_loop(VirtIOSCSIReq *req)
138-
{
139-
VirtIOSCSI *s = req->dev;
143+
virtqueue_detach_element(req->vq, &req->elem, 0);
140144

141-
if (!s->ctx || s->ctx == qemu_get_aio_context()) {
142-
/* No need to schedule a BH when there is no IOThread */
143-
virtio_scsi_complete_req(req);
144-
} else {
145-
/* Run request completion in the IOThread */
146-
aio_wait_bh_oneshot(s->ctx, virtio_scsi_complete_req_bh, req);
145+
if (vq_lock) {
146+
qemu_mutex_unlock(vq_lock);
147147
}
148-
}
149148

150-
static void virtio_scsi_bad_req(VirtIOSCSIReq *req)
151-
{
152-
virtio_error(VIRTIO_DEVICE(req->dev), "wrong size for virtio-scsi headers");
153-
virtqueue_detach_element(req->vq, &req->elem, 0);
154149
virtio_scsi_free_req(req);
155150
}
156151

@@ -235,12 +230,21 @@ static int virtio_scsi_parse_req(VirtIOSCSIReq *req,
235230
return 0;
236231
}
237232

238-
static VirtIOSCSIReq *virtio_scsi_pop_req(VirtIOSCSI *s, VirtQueue *vq)
233+
static VirtIOSCSIReq *virtio_scsi_pop_req(VirtIOSCSI *s, VirtQueue *vq, QemuMutex *vq_lock)
239234
{
240235
VirtIOSCSICommon *vs = (VirtIOSCSICommon *)s;
241236
VirtIOSCSIReq *req;
242237

238+
if (vq_lock) {
239+
qemu_mutex_lock(vq_lock);
240+
}
241+
243242
req = virtqueue_pop(vq, sizeof(VirtIOSCSIReq) + vs->cdb_size);
243+
244+
if (vq_lock) {
245+
qemu_mutex_unlock(vq_lock);
246+
}
247+
244248
if (!req) {
245249
return NULL;
246250
}
@@ -305,7 +309,7 @@ static void virtio_scsi_cancel_notify(Notifier *notifier, void *data)
305309

306310
trace_virtio_scsi_tmf_resp(virtio_scsi_get_lun(req->req.tmf.lun),
307311
req->req.tmf.tag, req->resp.tmf.response);
308-
virtio_scsi_complete_req(req);
312+
virtio_scsi_complete_req(req, &req->dev->ctrl_lock);
309313
}
310314
g_free(n);
311315
}
@@ -361,7 +365,7 @@ static void virtio_scsi_do_one_tmf_bh(VirtIOSCSIReq *req)
361365

362366
out:
363367
object_unref(OBJECT(d));
364-
virtio_scsi_complete_req_from_main_loop(req);
368+
virtio_scsi_complete_req(req, &s->ctrl_lock);
365369
}
366370

367371
/* Some TMFs must be processed from the main loop thread */
@@ -408,7 +412,7 @@ static void virtio_scsi_reset_tmf_bh(VirtIOSCSI *s)
408412

409413
/* SAM-6 6.3.2 Hard reset */
410414
req->resp.tmf.response = VIRTIO_SCSI_S_TARGET_FAILURE;
411-
virtio_scsi_complete_req(req);
415+
virtio_scsi_complete_req(req, &req->dev->ctrl_lock);
412416
}
413417
}
414418

@@ -562,15 +566,15 @@ static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
562566

563567
if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0,
564568
&type, sizeof(type)) < sizeof(type)) {
565-
virtio_scsi_bad_req(req);
569+
virtio_scsi_bad_req(req, &s->ctrl_lock);
566570
return;
567571
}
568572

569573
virtio_tswap32s(vdev, &type);
570574
if (type == VIRTIO_SCSI_T_TMF) {
571575
if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlTMFReq),
572576
sizeof(VirtIOSCSICtrlTMFResp)) < 0) {
573-
virtio_scsi_bad_req(req);
577+
virtio_scsi_bad_req(req, &s->ctrl_lock);
574578
return;
575579
} else {
576580
r = virtio_scsi_do_tmf(s, req);
@@ -580,7 +584,7 @@ static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
580584
type == VIRTIO_SCSI_T_AN_SUBSCRIBE) {
581585
if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlANReq),
582586
sizeof(VirtIOSCSICtrlANResp)) < 0) {
583-
virtio_scsi_bad_req(req);
587+
virtio_scsi_bad_req(req, &s->ctrl_lock);
584588
return;
585589
} else {
586590
req->req.an.event_requested =
@@ -600,7 +604,7 @@ static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
600604
type == VIRTIO_SCSI_T_AN_SUBSCRIBE)
601605
trace_virtio_scsi_an_resp(virtio_scsi_get_lun(req->req.an.lun),
602606
req->resp.an.response);
603-
virtio_scsi_complete_req(req);
607+
virtio_scsi_complete_req(req, &s->ctrl_lock);
604608
} else {
605609
assert(r == -EINPROGRESS);
606610
}
@@ -610,7 +614,7 @@ static void virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq)
610614
{
611615
VirtIOSCSIReq *req;
612616

613-
while ((req = virtio_scsi_pop_req(s, vq))) {
617+
while ((req = virtio_scsi_pop_req(s, vq, &s->ctrl_lock))) {
614618
virtio_scsi_handle_ctrl_req(s, req);
615619
}
616620
}
@@ -654,7 +658,7 @@ static void virtio_scsi_complete_cmd_req(VirtIOSCSIReq *req)
654658
* in virtio_scsi_command_complete.
655659
*/
656660
req->resp_size = sizeof(VirtIOSCSICmdResp);
657-
virtio_scsi_complete_req(req);
661+
virtio_scsi_complete_req(req, NULL);
658662
}
659663

660664
static void virtio_scsi_command_failed(SCSIRequest *r)
@@ -788,7 +792,7 @@ static int virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req)
788792
virtio_scsi_fail_cmd_req(req);
789793
return -ENOTSUP;
790794
} else {
791-
virtio_scsi_bad_req(req);
795+
virtio_scsi_bad_req(req, NULL);
792796
return -EINVAL;
793797
}
794798
}
@@ -843,7 +847,7 @@ static void virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq)
843847
virtio_queue_set_notification(vq, 0);
844848
}
845849

846-
while ((req = virtio_scsi_pop_req(s, vq))) {
850+
while ((req = virtio_scsi_pop_req(s, vq, NULL))) {
847851
ret = virtio_scsi_handle_cmd_req_prepare(s, req);
848852
if (!ret) {
849853
QTAILQ_INSERT_TAIL(&reqs, req, next);
@@ -973,7 +977,7 @@ static void virtio_scsi_push_event(VirtIOSCSI *s,
973977
return;
974978
}
975979

976-
req = virtio_scsi_pop_req(s, vs->event_vq);
980+
req = virtio_scsi_pop_req(s, vs->event_vq, &s->event_lock);
977981
if (!req) {
978982
s->events_dropped = true;
979983
return;
@@ -985,7 +989,7 @@ static void virtio_scsi_push_event(VirtIOSCSI *s,
985989
}
986990

987991
if (virtio_scsi_parse_req(req, 0, sizeof(VirtIOSCSIEvent))) {
988-
virtio_scsi_bad_req(req);
992+
virtio_scsi_bad_req(req, &s->event_lock);
989993
return;
990994
}
991995

@@ -1005,7 +1009,7 @@ static void virtio_scsi_push_event(VirtIOSCSI *s,
10051009
}
10061010
trace_virtio_scsi_event(virtio_scsi_get_lun(evt->lun), event, reason);
10071011

1008-
virtio_scsi_complete_req(req);
1012+
virtio_scsi_complete_req(req, &s->event_lock);
10091013
}
10101014

10111015
static void virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq)
@@ -1236,6 +1240,8 @@ static void virtio_scsi_device_realize(DeviceState *dev, Error **errp)
12361240
Error *err = NULL;
12371241

12381242
QTAILQ_INIT(&s->tmf_bh_list);
1243+
qemu_mutex_init(&s->ctrl_lock);
1244+
qemu_mutex_init(&s->event_lock);
12391245
qemu_mutex_init(&s->tmf_bh_lock);
12401246

12411247
virtio_scsi_common_realize(dev,
@@ -1280,6 +1286,8 @@ static void virtio_scsi_device_unrealize(DeviceState *dev)
12801286
qbus_set_hotplug_handler(BUS(&s->bus), NULL);
12811287
virtio_scsi_common_unrealize(dev);
12821288
qemu_mutex_destroy(&s->tmf_bh_lock);
1289+
qemu_mutex_destroy(&s->event_lock);
1290+
qemu_mutex_destroy(&s->ctrl_lock);
12831291
}
12841292

12851293
static const Property virtio_scsi_properties[] = {

include/hw/virtio/virtio-scsi.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -84,6 +84,9 @@ struct VirtIOSCSI {
8484
int resetting; /* written from main loop thread, read from any thread */
8585
bool events_dropped;
8686

87+
QemuMutex ctrl_lock; /* protects ctrl_vq */
88+
QemuMutex event_lock; /* protects event_vq */
89+
8790
/*
8891
* TMFs deferred to main loop BH. These fields are protected by
8992
* tmf_bh_lock.

0 commit comments

Comments
 (0)