Skip to content

Commit 17e1e49

Browse files
committed
Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging
Pull request Fixes for QEMU on aarch64 ARM hosts and fdmon-io_uring. # gpg: Signature made Thu 09 Apr 2020 18:42:01 BST # gpg: using RSA key 8695A8BFD3F97CDAAC35775A9CA4ABB381AB73C8 # gpg: Good signature from "Stefan Hajnoczi <[email protected]>" [full] # gpg: aka "Stefan Hajnoczi <[email protected]>" [full] # Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35 775A 9CA4 ABB3 81AB 73C8 * remotes/stefanha/tags/block-pull-request: async: use explicit memory barriers aio-wait: delegate polling of main AioContext if BQL not held aio-posix: signal-proof fdmon-io_uring Signed-off-by: Peter Maydell <[email protected]>
2 parents 8bac3ba + 5710a3e commit 17e1e49

File tree

6 files changed

+80
-30
lines changed

6 files changed

+80
-30
lines changed

include/block/aio-wait.h

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
#define QEMU_AIO_WAIT_H
2727

2828
#include "block/aio.h"
29+
#include "qemu/main-loop.h"
2930

3031
/**
3132
* AioWait:
@@ -124,4 +125,25 @@ void aio_wait_kick(void);
124125
*/
125126
void aio_wait_bh_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
126127

128+
/**
129+
* in_aio_context_home_thread:
130+
* @ctx: the aio context
131+
*
132+
* Return whether we are running in the thread that normally runs @ctx. Note
133+
* that acquiring/releasing ctx does not affect the outcome, each AioContext
134+
* still only has one home thread that is responsible for running it.
135+
*/
136+
static inline bool in_aio_context_home_thread(AioContext *ctx)
137+
{
138+
if (ctx == qemu_get_current_aio_context()) {
139+
return true;
140+
}
141+
142+
if (ctx == qemu_get_aio_context()) {
143+
return qemu_mutex_iothread_locked();
144+
} else {
145+
return false;
146+
}
147+
}
148+
127149
#endif /* QEMU_AIO_WAIT_H */

include/block/aio.h

Lines changed: 10 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -133,12 +133,16 @@ struct AioContext {
133133
AioHandlerList deleted_aio_handlers;
134134

135135
/* Used to avoid unnecessary event_notifier_set calls in aio_notify;
136-
* accessed with atomic primitives. If this field is 0, everything
137-
* (file descriptors, bottom halves, timers) will be re-evaluated
138-
* before the next blocking poll(), thus the event_notifier_set call
139-
* can be skipped. If it is non-zero, you may need to wake up a
140-
* concurrent aio_poll or the glib main event loop, making
141-
* event_notifier_set necessary.
136+
* only written from the AioContext home thread, or under the BQL in
137+
* the case of the main AioContext. However, it is read from any
138+
* thread so it is still accessed with atomic primitives.
139+
*
140+
* If this field is 0, everything (file descriptors, bottom halves,
141+
* timers) will be re-evaluated before the next blocking poll() or
142+
* io_uring wait; therefore, the event_notifier_set call can be
143+
* skipped. If it is non-zero, you may need to wake up a concurrent
144+
* aio_poll or the glib main event loop, making event_notifier_set
145+
* necessary.
142146
*
143147
* Bit 0 is reserved for GSource usage of the AioContext, and is 1
144148
* between a call to aio_ctx_prepare and the next call to aio_ctx_check.
@@ -681,19 +685,6 @@ void aio_co_enter(AioContext *ctx, struct Coroutine *co);
681685
*/
682686
AioContext *qemu_get_current_aio_context(void);
683687

684-
/**
685-
* in_aio_context_home_thread:
686-
* @ctx: the aio context
687-
*
688-
* Return whether we are running in the thread that normally runs @ctx. Note
689-
* that acquiring/releasing ctx does not affect the outcome, each AioContext
690-
* still only has one home thread that is responsible for running it.
691-
*/
692-
static inline bool in_aio_context_home_thread(AioContext *ctx)
693-
{
694-
return ctx == qemu_get_current_aio_context();
695-
}
696-
697688
/**
698689
* aio_context_setup:
699690
* @ctx: the aio context

util/aio-posix.c

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -559,6 +559,11 @@ bool aio_poll(AioContext *ctx, bool blocking)
559559
int64_t timeout;
560560
int64_t start = 0;
561561

562+
/*
563+
* There cannot be two concurrent aio_poll calls for the same AioContext (or
564+
* an aio_poll concurrent with a GSource prepare/check/dispatch callback).
565+
* We rely on this below to avoid slow locked accesses to ctx->notify_me.
566+
*/
562567
assert(in_aio_context_home_thread(ctx));
563568

564569
/* aio_notify can avoid the expensive event_notifier_set if
@@ -569,7 +574,13 @@ bool aio_poll(AioContext *ctx, bool blocking)
569574
* so disable the optimization now.
570575
*/
571576
if (blocking) {
572-
atomic_add(&ctx->notify_me, 2);
577+
atomic_set(&ctx->notify_me, atomic_read(&ctx->notify_me) + 2);
578+
/*
579+
* Write ctx->notify_me before computing the timeout
580+
* (reading bottom half flags, etc.). Pairs with
581+
* smp_mb in aio_notify().
582+
*/
583+
smp_mb();
573584
}
574585

575586
qemu_lockcnt_inc(&ctx->list_lock);
@@ -590,7 +601,8 @@ bool aio_poll(AioContext *ctx, bool blocking)
590601
}
591602

592603
if (blocking) {
593-
atomic_sub(&ctx->notify_me, 2);
604+
/* Finish the poll before clearing the flag. */
605+
atomic_store_release(&ctx->notify_me, atomic_read(&ctx->notify_me) - 2);
594606
aio_notify_accept(ctx);
595607
}
596608

util/aio-win32.c

Lines changed: 14 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -321,6 +321,12 @@ bool aio_poll(AioContext *ctx, bool blocking)
321321
int count;
322322
int timeout;
323323

324+
/*
325+
* There cannot be two concurrent aio_poll calls for the same AioContext (or
326+
* an aio_poll concurrent with a GSource prepare/check/dispatch callback).
327+
* We rely on this below to avoid slow locked accesses to ctx->notify_me.
328+
*/
329+
assert(in_aio_context_home_thread(ctx));
324330
progress = false;
325331

326332
/* aio_notify can avoid the expensive event_notifier_set if
@@ -331,7 +337,13 @@ bool aio_poll(AioContext *ctx, bool blocking)
331337
* so disable the optimization now.
332338
*/
333339
if (blocking) {
334-
atomic_add(&ctx->notify_me, 2);
340+
atomic_set(&ctx->notify_me, atomic_read(&ctx->notify_me) + 2);
341+
/*
342+
* Write ctx->notify_me before computing the timeout
343+
* (reading bottom half flags, etc.). Pairs with
344+
* smp_mb in aio_notify().
345+
*/
346+
smp_mb();
335347
}
336348

337349
qemu_lockcnt_inc(&ctx->list_lock);
@@ -364,8 +376,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
364376
ret = WaitForMultipleObjects(count, events, FALSE, timeout);
365377
if (blocking) {
366378
assert(first);
367-
assert(in_aio_context_home_thread(ctx));
368-
atomic_sub(&ctx->notify_me, 2);
379+
atomic_store_release(&ctx->notify_me, atomic_read(&ctx->notify_me) - 2);
369380
aio_notify_accept(ctx);
370381
}
371382

util/async.c

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -249,7 +249,14 @@ aio_ctx_prepare(GSource *source, gint *timeout)
249249
{
250250
AioContext *ctx = (AioContext *) source;
251251

252-
atomic_or(&ctx->notify_me, 1);
252+
atomic_set(&ctx->notify_me, atomic_read(&ctx->notify_me) | 1);
253+
254+
/*
255+
* Write ctx->notify_me before computing the timeout
256+
* (reading bottom half flags, etc.). Pairs with
257+
* smp_mb in aio_notify().
258+
*/
259+
smp_mb();
253260

254261
/* We assume there is no timeout already supplied */
255262
*timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx));
@@ -268,7 +275,8 @@ aio_ctx_check(GSource *source)
268275
QEMUBH *bh;
269276
BHListSlice *s;
270277

271-
atomic_and(&ctx->notify_me, ~1);
278+
/* Finish computing the timeout before clearing the flag. */
279+
atomic_store_release(&ctx->notify_me, atomic_read(&ctx->notify_me) & ~1);
272280
aio_notify_accept(ctx);
273281

274282
QSLIST_FOREACH_RCU(bh, &ctx->bh_list, next) {
@@ -411,10 +419,10 @@ LuringState *aio_get_linux_io_uring(AioContext *ctx)
411419
void aio_notify(AioContext *ctx)
412420
{
413421
/* Write e.g. bh->scheduled before reading ctx->notify_me. Pairs
414-
* with atomic_or in aio_ctx_prepare or atomic_add in aio_poll.
422+
* with smp_mb in aio_ctx_prepare or aio_poll.
415423
*/
416424
smp_mb();
417-
if (ctx->notify_me) {
425+
if (atomic_read(&ctx->notify_me)) {
418426
event_notifier_set(&ctx->notifier);
419427
atomic_mb_set(&ctx->notified, true);
420428
}

util/fdmon-io_uring.c

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,10 @@ static struct io_uring_sqe *get_sqe(AioContext *ctx)
8888
}
8989

9090
/* No free sqes left, submit pending sqes first */
91-
ret = io_uring_submit(ring);
91+
do {
92+
ret = io_uring_submit(ring);
93+
} while (ret == -EINTR);
94+
9295
assert(ret > 1);
9396
sqe = io_uring_get_sqe(ring);
9497
assert(sqe);
@@ -282,7 +285,10 @@ static int fdmon_io_uring_wait(AioContext *ctx, AioHandlerList *ready_list,
282285

283286
fill_sq_ring(ctx);
284287

285-
ret = io_uring_submit_and_wait(&ctx->fdmon_io_uring, wait_nr);
288+
do {
289+
ret = io_uring_submit_and_wait(&ctx->fdmon_io_uring, wait_nr);
290+
} while (ret == -EINTR);
291+
286292
assert(ret >= 0);
287293

288294
return process_cq_ring(ctx, ready_list);

0 commit comments

Comments
 (0)