mirror of
https://github.com/mii443/qemu.git
synced 2025-08-22 23:25:48 +00:00
qemu/atomic.h: rename atomic_ to qatomic_
clang's C11 atomic_fetch_*() functions only take a C11 atomic type pointer argument. QEMU uses direct types (int, etc) and this causes a compiler error when a QEMU code calls these functions in a source file that also included <stdatomic.h> via a system header file: $ CC=clang CXX=clang++ ./configure ... && make ../util/async.c:79:17: error: address argument to atomic operation must be a pointer to _Atomic type ('unsigned int *' invalid) Avoid using atomic_*() names in QEMU's atomic.h since that namespace is used by <stdatomic.h>. Prefix QEMU's APIs with 'q' so that atomic.h and <stdatomic.h> can co-exist. I checked /usr/include on my machine and searched GitHub for existing "qatomic_" users but there seem to be none. This patch was generated using: $ git grep -h -o '\<atomic\(64\)\?_[a-z0-9_]\+' include/qemu/atomic.h | \ sort -u >/tmp/changed_identifiers $ for identifier in $(</tmp/changed_identifiers); do sed -i "s%\<$identifier\>%q$identifier%g" \ $(git grep -I -l "\<$identifier\>") done I manually fixed line-wrap issues and misaligned rST tables. Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> Acked-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20200923105646.47864-1-stefanha@redhat.com>
This commit is contained in:
28
util/async.c
28
util/async.c
@ -70,13 +70,13 @@ static void aio_bh_enqueue(QEMUBH *bh, unsigned new_flags)
|
||||
unsigned old_flags;
|
||||
|
||||
/*
|
||||
* The memory barrier implicit in atomic_fetch_or makes sure that:
|
||||
* The memory barrier implicit in qatomic_fetch_or makes sure that:
|
||||
* 1. idle & any writes needed by the callback are done before the
|
||||
* locations are read in the aio_bh_poll.
|
||||
* 2. ctx is loaded before the callback has a chance to execute and bh
|
||||
* could be freed.
|
||||
*/
|
||||
old_flags = atomic_fetch_or(&bh->flags, BH_PENDING | new_flags);
|
||||
old_flags = qatomic_fetch_or(&bh->flags, BH_PENDING | new_flags);
|
||||
if (!(old_flags & BH_PENDING)) {
|
||||
QSLIST_INSERT_HEAD_ATOMIC(&ctx->bh_list, bh, next);
|
||||
}
|
||||
@ -96,13 +96,13 @@ static QEMUBH *aio_bh_dequeue(BHList *head, unsigned *flags)
|
||||
QSLIST_REMOVE_HEAD(head, next);
|
||||
|
||||
/*
|
||||
* The atomic_and is paired with aio_bh_enqueue(). The implicit memory
|
||||
* The qatomic_and is paired with aio_bh_enqueue(). The implicit memory
|
||||
* barrier ensures that the callback sees all writes done by the scheduling
|
||||
* thread. It also ensures that the scheduling thread sees the cleared
|
||||
* flag before bh->cb has run, and thus will call aio_notify again if
|
||||
* necessary.
|
||||
*/
|
||||
*flags = atomic_fetch_and(&bh->flags,
|
||||
*flags = qatomic_fetch_and(&bh->flags,
|
||||
~(BH_PENDING | BH_SCHEDULED | BH_IDLE));
|
||||
return bh;
|
||||
}
|
||||
@ -185,7 +185,7 @@ void qemu_bh_schedule(QEMUBH *bh)
|
||||
*/
|
||||
void qemu_bh_cancel(QEMUBH *bh)
|
||||
{
|
||||
atomic_and(&bh->flags, ~BH_SCHEDULED);
|
||||
qatomic_and(&bh->flags, ~BH_SCHEDULED);
|
||||
}
|
||||
|
||||
/* This func is async.The bottom half will do the delete action at the finial
|
||||
@ -249,7 +249,7 @@ aio_ctx_prepare(GSource *source, gint *timeout)
|
||||
{
|
||||
AioContext *ctx = (AioContext *) source;
|
||||
|
||||
atomic_set(&ctx->notify_me, atomic_read(&ctx->notify_me) | 1);
|
||||
qatomic_set(&ctx->notify_me, qatomic_read(&ctx->notify_me) | 1);
|
||||
|
||||
/*
|
||||
* Write ctx->notify_me before computing the timeout
|
||||
@ -276,7 +276,7 @@ aio_ctx_check(GSource *source)
|
||||
BHListSlice *s;
|
||||
|
||||
/* Finish computing the timeout before clearing the flag. */
|
||||
atomic_store_release(&ctx->notify_me, atomic_read(&ctx->notify_me) & ~1);
|
||||
qatomic_store_release(&ctx->notify_me, qatomic_read(&ctx->notify_me) & ~1);
|
||||
aio_notify_accept(ctx);
|
||||
|
||||
QSLIST_FOREACH_RCU(bh, &ctx->bh_list, next) {
|
||||
@ -424,21 +424,21 @@ void aio_notify(AioContext *ctx)
|
||||
* aio_notify_accept.
|
||||
*/
|
||||
smp_wmb();
|
||||
atomic_set(&ctx->notified, true);
|
||||
qatomic_set(&ctx->notified, true);
|
||||
|
||||
/*
|
||||
* Write ctx->notified before reading ctx->notify_me. Pairs
|
||||
* with smp_mb in aio_ctx_prepare or aio_poll.
|
||||
*/
|
||||
smp_mb();
|
||||
if (atomic_read(&ctx->notify_me)) {
|
||||
if (qatomic_read(&ctx->notify_me)) {
|
||||
event_notifier_set(&ctx->notifier);
|
||||
}
|
||||
}
|
||||
|
||||
void aio_notify_accept(AioContext *ctx)
|
||||
{
|
||||
atomic_set(&ctx->notified, false);
|
||||
qatomic_set(&ctx->notified, false);
|
||||
|
||||
/*
|
||||
* Write ctx->notified before reading e.g. bh->flags. Pairs with smp_wmb
|
||||
@ -465,7 +465,7 @@ static bool aio_context_notifier_poll(void *opaque)
|
||||
EventNotifier *e = opaque;
|
||||
AioContext *ctx = container_of(e, AioContext, notifier);
|
||||
|
||||
return atomic_read(&ctx->notified);
|
||||
return qatomic_read(&ctx->notified);
|
||||
}
|
||||
|
||||
static void co_schedule_bh_cb(void *opaque)
|
||||
@ -489,7 +489,7 @@ static void co_schedule_bh_cb(void *opaque)
|
||||
aio_context_acquire(ctx);
|
||||
|
||||
/* Protected by write barrier in qemu_aio_coroutine_enter */
|
||||
atomic_set(&co->scheduled, NULL);
|
||||
qatomic_set(&co->scheduled, NULL);
|
||||
qemu_aio_coroutine_enter(ctx, co);
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
@ -546,7 +546,7 @@ fail:
|
||||
void aio_co_schedule(AioContext *ctx, Coroutine *co)
|
||||
{
|
||||
trace_aio_co_schedule(ctx, co);
|
||||
const char *scheduled = atomic_cmpxchg(&co->scheduled, NULL,
|
||||
const char *scheduled = qatomic_cmpxchg(&co->scheduled, NULL,
|
||||
__func__);
|
||||
|
||||
if (scheduled) {
|
||||
@ -577,7 +577,7 @@ void aio_co_wake(struct Coroutine *co)
|
||||
* qemu_coroutine_enter.
|
||||
*/
|
||||
smp_read_barrier_depends();
|
||||
ctx = atomic_read(&co->ctx);
|
||||
ctx = qatomic_read(&co->ctx);
|
||||
|
||||
aio_co_enter(ctx, co);
|
||||
}
|
||||
|
Reference in New Issue
Block a user