[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH 1/2] lockable: use QLNULL for a null lockable
From: |
Joe Slater |
Subject: |
[PATCH 1/2] lockable: use QLNULL for a null lockable |
Date: |
Wed, 3 Jun 2020 15:49:02 -0700 |
Allows us to build with -Og and optimizations that
do not clean up dead-code.
Signed-off-by: Joe Slater <joe.slater@windriver.com>
to be squished
Signed-off-by: Joe Slater <joe.slater@windriver.com>
---
block/block-backend.c | 4 ++--
block/block-copy.c | 2 +-
block/mirror.c | 5 +++--
fsdev/qemu-fsdev-throttle.c | 6 +++---
hw/9pfs/9p.c | 2 +-
include/qemu/lockable.h | 3 +++
util/qemu-co-shared-resource.c | 2 +-
7 files changed, 14 insertions(+), 10 deletions(-)
diff --git a/block/block-backend.c b/block/block-backend.c
index 6936b25..92128e8 100644
--- a/block/block-backend.c
+++ b/block/block-backend.c
@@ -1174,7 +1174,7 @@ static void coroutine_fn
blk_wait_while_drained(BlockBackend *blk)
if (blk->quiesce_counter && !blk->disable_request_queuing) {
blk_dec_in_flight(blk);
- qemu_co_queue_wait(&blk->queued_requests, NULL);
+ qemu_co_queue_wait(&blk->queued_requests, QLNULL);
blk_inc_in_flight(blk);
}
}
@@ -2367,7 +2367,7 @@ static void blk_root_drained_end(BdrvChild *child, int
*drained_end_counter)
if (blk->dev_ops && blk->dev_ops->drained_end) {
blk->dev_ops->drained_end(blk->dev_opaque);
}
- while (qemu_co_enter_next(&blk->queued_requests, NULL)) {
+ while (qemu_co_enter_next(&blk->queued_requests, QLNULL)) {
/* Resume all queued requests */
}
}
diff --git a/block/block-copy.c b/block/block-copy.c
index bb8d056..8de0b54 100644
--- a/block/block-copy.c
+++ b/block/block-copy.c
@@ -120,7 +120,7 @@ static bool coroutine_fn block_copy_wait_one(BlockCopyState
*s, int64_t offset,
return false;
}
- qemu_co_queue_wait(&task->wait_queue, NULL);
+ qemu_co_queue_wait(&task->wait_queue, QLNULL);
return true;
}
diff --git a/block/mirror.c b/block/mirror.c
index e8e8844..76c082d2 100644
--- a/block/mirror.c
+++ b/block/mirror.c
@@ -28,6 +28,7 @@
#define MAX_IO_BYTES (1 << 20) /* 1 Mb */
#define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES)
+
/* The mirroring buffer is a list of granularity-sized chunks.
* Free chunks are organized in a list.
*/
@@ -157,7 +158,7 @@ static void coroutine_fn mirror_wait_on_conflicts(MirrorOp
*self,
if (ranges_overlap(self_start_chunk, self_nb_chunks,
op_start_chunk, op_nb_chunks))
{
- qemu_co_queue_wait(&op->waiting_requests, NULL);
+ qemu_co_queue_wait(&op->waiting_requests, QLNULL);
break;
}
}
@@ -297,7 +298,7 @@ mirror_wait_for_any_operation(MirrorBlockJob *s, bool
active)
if (!op->is_pseudo_op && op->is_in_flight &&
op->is_active_write == active)
{
- qemu_co_queue_wait(&op->waiting_requests, NULL);
+ qemu_co_queue_wait(&op->waiting_requests, QLNULL);
return;
}
}
diff --git a/fsdev/qemu-fsdev-throttle.c b/fsdev/qemu-fsdev-throttle.c
index 5c83a1c..78d256d 100644
--- a/fsdev/qemu-fsdev-throttle.c
+++ b/fsdev/qemu-fsdev-throttle.c
@@ -22,13 +22,13 @@
static void fsdev_throttle_read_timer_cb(void *opaque)
{
FsThrottle *fst = opaque;
- qemu_co_enter_next(&fst->throttled_reqs[false], NULL);
+ qemu_co_enter_next(&fst->throttled_reqs[false], QLNULL);
}
static void fsdev_throttle_write_timer_cb(void *opaque)
{
FsThrottle *fst = opaque;
- qemu_co_enter_next(&fst->throttled_reqs[true], NULL);
+ qemu_co_enter_next(&fst->throttled_reqs[true], QLNULL);
}
int fsdev_throttle_parse_opts(QemuOpts *opts, FsThrottle *fst, Error **errp)
@@ -100,7 +100,7 @@ void coroutine_fn fsdev_co_throttle_request(FsThrottle
*fst, bool is_write,
if (throttle_enabled(&fst->cfg)) {
if (throttle_schedule_timer(&fst->ts, &fst->tt, is_write) ||
!qemu_co_queue_empty(&fst->throttled_reqs[is_write])) {
- qemu_co_queue_wait(&fst->throttled_reqs[is_write], NULL);
+ qemu_co_queue_wait(&fst->throttled_reqs[is_write], QLNULL);
}
throttle_account(&fst->ts, is_write, iov_size(iov, iovcnt));
diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c
index 45a788f..35976e2 100644
--- a/hw/9pfs/9p.c
+++ b/hw/9pfs/9p.c
@@ -2888,7 +2888,7 @@ static void coroutine_fn v9fs_flush(void *opaque)
/*
* Wait for pdu to complete.
*/
- qemu_co_queue_wait(&cancel_pdu->complete, NULL);
+ qemu_co_queue_wait(&cancel_pdu->complete, QLNULL);
if (!qemu_co_queue_next(&cancel_pdu->complete)) {
cancel_pdu->cancelled = 0;
pdu_free(cancel_pdu);
diff --git a/include/qemu/lockable.h b/include/qemu/lockable.h
index b620023..7f7aebb 100644
--- a/include/qemu/lockable.h
+++ b/include/qemu/lockable.h
@@ -24,6 +24,9 @@ struct QemuLockable {
QemuLockUnlockFunc *unlock;
};
+#define QLNULL ((QemuLockable *)0)
+
+
/* This function gives an error if an invalid, non-NULL pointer type is passed
* to QEMU_MAKE_LOCKABLE. For optimized builds, we can rely on dead-code
elimination
* from the compiler, and give the errors already at link time.
diff --git a/util/qemu-co-shared-resource.c b/util/qemu-co-shared-resource.c
index 1c83cd9..7423ea4 100644
--- a/util/qemu-co-shared-resource.c
+++ b/util/qemu-co-shared-resource.c
@@ -64,7 +64,7 @@ void coroutine_fn co_get_from_shres(SharedResource *s,
uint64_t n)
{
assert(n <= s->total);
while (!co_try_get_from_shres(s, n)) {
- qemu_co_queue_wait(&s->queue, NULL);
+ qemu_co_queue_wait(&s->queue, QLNULL);
}
}
--
2.7.4