@@ -100,8 +100,15 @@ static void scsi_device_for_each_req_sync(SCSIDevice *s,
100
100
assert (!runstate_is_running ());
101
101
assert (qemu_in_main_thread ());
102
102
103
- QTAILQ_FOREACH_SAFE (req , & s -> requests , next , next_req ) {
104
- fn (req , opaque );
103
+ /*
104
+ * Locking is not necessary because the guest is stopped and no other
105
+ * threads can be accessing the requests list, but take the lock for
106
+ * consistency.
107
+ */
108
+ WITH_QEMU_LOCK_GUARD (& s -> requests_lock ) {
109
+ QTAILQ_FOREACH_SAFE (req , & s -> requests , next , next_req ) {
110
+ fn (req , opaque );
111
+ }
105
112
}
106
113
}
107
114
@@ -115,21 +122,29 @@ static void scsi_device_for_each_req_async_bh(void *opaque)
115
122
{
116
123
g_autofree SCSIDeviceForEachReqAsyncData * data = opaque ;
117
124
SCSIDevice * s = data -> s ;
118
- AioContext * ctx ;
119
- SCSIRequest * req ;
120
- SCSIRequest * next ;
125
+ g_autoptr (GList ) reqs = NULL ;
121
126
122
127
/*
123
- * The BB cannot have changed contexts between this BH being scheduled and
124
- * now: BBs' AioContexts, when they have a node attached, can only be
125
- * changed via bdrv_try_change_aio_context(), in a drained section. While
126
- * we have the in-flight counter incremented, that drain must block.
128
+ * Build a list of requests in this AioContext so fn() can be invoked later
129
+ * outside requests_lock.
127
130
*/
128
- ctx = blk_get_aio_context (s -> conf .blk );
129
- assert (ctx == qemu_get_current_aio_context ());
131
+ WITH_QEMU_LOCK_GUARD (& s -> requests_lock ) {
132
+ AioContext * ctx = qemu_get_current_aio_context ();
133
+ SCSIRequest * req ;
134
+ SCSIRequest * next ;
135
+
136
+ QTAILQ_FOREACH_SAFE (req , & s -> requests , next , next ) {
137
+ if (req -> ctx == ctx ) {
138
+ scsi_req_ref (req ); /* dropped after calling fn() */
139
+ reqs = g_list_prepend (reqs , req );
140
+ }
141
+ }
142
+ }
130
143
131
- QTAILQ_FOREACH_SAFE (req , & s -> requests , next , next ) {
132
- data -> fn (req , data -> fn_opaque );
144
+ /* Call fn() on each request */
145
+ for (GList * elem = g_list_first (reqs ); elem ; elem = g_list_next (elem )) {
146
+ data -> fn (elem -> data , data -> fn_opaque );
147
+ scsi_req_unref (elem -> data );
133
148
}
134
149
135
150
/* Drop the reference taken by scsi_device_for_each_req_async() */
@@ -139,9 +154,35 @@ static void scsi_device_for_each_req_async_bh(void *opaque)
139
154
blk_dec_in_flight (s -> conf .blk );
140
155
}
141
156
157
+ static void scsi_device_for_each_req_async_do_ctx (gpointer key , gpointer value ,
158
+ gpointer user_data )
159
+ {
160
+ AioContext * ctx = key ;
161
+ SCSIDeviceForEachReqAsyncData * params = user_data ;
162
+ SCSIDeviceForEachReqAsyncData * data ;
163
+
164
+ data = g_new (SCSIDeviceForEachReqAsyncData , 1 );
165
+ data -> s = params -> s ;
166
+ data -> fn = params -> fn ;
167
+ data -> fn_opaque = params -> fn_opaque ;
168
+
169
+ /*
170
+ * Hold a reference to the SCSIDevice until
171
+ * scsi_device_for_each_req_async_bh() finishes.
172
+ */
173
+ object_ref (OBJECT (data -> s ));
174
+
175
+ /* Paired with scsi_device_for_each_req_async_bh() */
176
+ blk_inc_in_flight (data -> s -> conf .blk );
177
+
178
+ aio_bh_schedule_oneshot (ctx , scsi_device_for_each_req_async_bh , data );
179
+ }
180
+
142
181
/*
143
182
* Schedule @fn() to be invoked for each enqueued request in device @s. @fn()
144
- * runs in the AioContext that is executing the request.
183
+ * must be thread-safe because it runs concurrently in each AioContext that is
184
+ * executing a request.
185
+ *
145
186
* Keeps the BlockBackend's in-flight counter incremented until everything is
146
187
* done, so draining it will settle all scheduled @fn() calls.
147
188
*/
@@ -151,24 +192,26 @@ static void scsi_device_for_each_req_async(SCSIDevice *s,
151
192
{
152
193
assert (qemu_in_main_thread ());
153
194
154
- SCSIDeviceForEachReqAsyncData * data =
155
- g_new (SCSIDeviceForEachReqAsyncData , 1 );
156
-
157
- data -> s = s ;
158
- data -> fn = fn ;
159
- data -> fn_opaque = opaque ;
160
-
161
- /*
162
- * Hold a reference to the SCSIDevice until
163
- * scsi_device_for_each_req_async_bh() finishes.
164
- */
165
- object_ref (OBJECT (s ));
195
+ /* The set of AioContexts where the requests are being processed */
196
+ g_autoptr (GHashTable ) aio_contexts = g_hash_table_new (NULL , NULL );
197
+ WITH_QEMU_LOCK_GUARD (& s -> requests_lock ) {
198
+ SCSIRequest * req ;
199
+ QTAILQ_FOREACH (req , & s -> requests , next ) {
200
+ g_hash_table_add (aio_contexts , req -> ctx );
201
+ }
202
+ }
166
203
167
- /* Paired with blk_dec_in_flight() in scsi_device_for_each_req_async_bh() */
168
- blk_inc_in_flight (s -> conf .blk );
169
- aio_bh_schedule_oneshot (blk_get_aio_context (s -> conf .blk ),
170
- scsi_device_for_each_req_async_bh ,
171
- data );
204
+ /* Schedule a BH for each AioContext */
205
+ SCSIDeviceForEachReqAsyncData params = {
206
+ .s = s ,
207
+ .fn = fn ,
208
+ .fn_opaque = opaque ,
209
+ };
210
+ g_hash_table_foreach (
211
+ aio_contexts ,
212
+ scsi_device_for_each_req_async_do_ctx ,
213
+ & params
214
+ );
172
215
}
173
216
174
217
static void scsi_device_realize (SCSIDevice * s , Error * * errp )
@@ -349,6 +392,7 @@ static void scsi_qdev_realize(DeviceState *qdev, Error **errp)
349
392
dev -> lun = lun ;
350
393
}
351
394
395
+ qemu_mutex_init (& dev -> requests_lock );
352
396
QTAILQ_INIT (& dev -> requests );
353
397
scsi_device_realize (dev , & local_err );
354
398
if (local_err ) {
@@ -369,6 +413,8 @@ static void scsi_qdev_unrealize(DeviceState *qdev)
369
413
370
414
scsi_device_purge_requests (dev , SENSE_CODE (NO_SENSE ));
371
415
416
+ qemu_mutex_destroy (& dev -> requests_lock );
417
+
372
418
scsi_device_unrealize (dev );
373
419
374
420
blockdev_mark_auto_del (dev -> conf .blk );
@@ -965,7 +1011,10 @@ static void scsi_req_enqueue_internal(SCSIRequest *req)
965
1011
req -> sg = NULL ;
966
1012
}
967
1013
req -> enqueued = true;
968
- QTAILQ_INSERT_TAIL (& req -> dev -> requests , req , next );
1014
+
1015
+ WITH_QEMU_LOCK_GUARD (& req -> dev -> requests_lock ) {
1016
+ QTAILQ_INSERT_TAIL (& req -> dev -> requests , req , next );
1017
+ }
969
1018
}
970
1019
971
1020
int32_t scsi_req_enqueue (SCSIRequest * req )
@@ -985,7 +1034,9 @@ static void scsi_req_dequeue(SCSIRequest *req)
985
1034
trace_scsi_req_dequeue (req -> dev -> id , req -> lun , req -> tag );
986
1035
req -> retry = false;
987
1036
if (req -> enqueued ) {
988
- QTAILQ_REMOVE (& req -> dev -> requests , req , next );
1037
+ WITH_QEMU_LOCK_GUARD (& req -> dev -> requests_lock ) {
1038
+ QTAILQ_REMOVE (& req -> dev -> requests , req , next );
1039
+ }
989
1040
req -> enqueued = false;
990
1041
scsi_req_unref (req );
991
1042
}
@@ -1962,8 +2013,7 @@ static void scsi_device_class_init(ObjectClass *klass, void *data)
1962
2013
1963
2014
static void scsi_dev_instance_init (Object * obj )
1964
2015
{
1965
- DeviceState * dev = DEVICE (obj );
1966
- SCSIDevice * s = SCSI_DEVICE (dev );
2016
+ SCSIDevice * s = SCSI_DEVICE (obj );
1967
2017
1968
2018
device_add_bootindex_property (obj , & s -> conf .bootindex ,
1969
2019
"bootindex" , NULL ,
0 commit comments