qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 2/9] block: Replaced qemu_mutex_lock calls with QEMU_LOCK_GUA


From: Vladimir Sementsov-Ogievskiy
Subject: Re: [PATCH 2/9] block: Replaced qemu_mutex_lock calls with QEMU_LOCK_GUARD
Date: Fri, 12 Mar 2021 13:23:48 +0300
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:78.0) Gecko/20100101 Thunderbird/78.8.1

11.03.2021 06:15, Mahmoud Mandour wrote:
Replaced various qemu_mutex_lock/qemu_mutex_unlock calls with
lock guard macros (QEMU_LOCK_GUARD() and WITH_QEMU_LOCK_GUARD).
This slightly simplifies the code by eliminating calls to
qemu_mutex_unlock and eliminates goto paths.

Signed-off-by: Mahmoud Mandour <ma.mandourr@gmail.com>
---
  block/curl.c |  13 ++--
  block/nbd.c  | 188 ++++++++++++++++++++++++---------------------------

Better would be make two separate patches I think.

  2 files changed, 95 insertions(+), 106 deletions(-)

diff --git a/block/curl.c b/block/curl.c
index 4ff895df8f..56a217951a 100644
--- a/block/curl.c
+++ b/block/curl.c
@@ -832,12 +832,12 @@ static void curl_setup_preadv(BlockDriverState *bs, 
CURLAIOCB *acb)
      uint64_t start = acb->offset;
      uint64_t end;
- qemu_mutex_lock(&s->mutex);
+    QEMU_LOCK_GUARD(&s->mutex);
// In case we have the requested data already (e.g. read-ahead),
      // we can just call the callback and be done.
      if (curl_find_buf(s, start, acb->bytes, acb)) {
-        goto out;
+        return;
      }
// No cache found, so let's start a new request
@@ -852,7 +852,7 @@ static void curl_setup_preadv(BlockDriverState *bs, 
CURLAIOCB *acb)
      if (curl_init_state(s, state) < 0) {
          curl_clean_state(state);
          acb->ret = -EIO;
-        goto out;
+        return;
      }
acb->start = 0;
@@ -867,7 +867,7 @@ static void curl_setup_preadv(BlockDriverState *bs, 
CURLAIOCB *acb)
      if (state->buf_len && state->orig_buf == NULL) {
          curl_clean_state(state);
          acb->ret = -ENOMEM;
-        goto out;
+        return;
      }
      state->acb[0] = acb;
@@ -880,14 +880,11 @@ static void curl_setup_preadv(BlockDriverState *bs, CURLAIOCB *acb)
          acb->ret = -EIO;
curl_clean_state(state);
-        goto out;
+        return;
      }
/* Tell curl it needs to kick things off */
      curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running);
-
-out:
-    qemu_mutex_unlock(&s->mutex);
  }

This change is obvious and good.

static int coroutine_fn curl_co_preadv(BlockDriverState *bs,
diff --git a/block/nbd.c b/block/nbd.c
index c26dc5a54f..28ba7aad61 100644
--- a/block/nbd.c
+++ b/block/nbd.c
@@ -407,27 +407,25 @@ static void *connect_thread_func(void *opaque)
          thr->sioc = NULL;
      }
- qemu_mutex_lock(&thr->mutex);
-
-    switch (thr->state) {
-    case CONNECT_THREAD_RUNNING:
-        thr->state = ret < 0 ? CONNECT_THREAD_FAIL : CONNECT_THREAD_SUCCESS;
-        if (thr->bh_ctx) {
-            aio_bh_schedule_oneshot(thr->bh_ctx, thr->bh_func, thr->bh_opaque);
-
-            /* play safe, don't reuse bh_ctx on further connection attempts */
-            thr->bh_ctx = NULL;
+    WITH_QEMU_LOCK_GUARD(&thr->mutex) {
+        switch (thr->state) {
+        case CONNECT_THREAD_RUNNING:
+            thr->state = ret < 0 ? CONNECT_THREAD_FAIL : 
CONNECT_THREAD_SUCCESS;
+            if (thr->bh_ctx) {
+                aio_bh_schedule_oneshot(thr->bh_ctx, thr->bh_func, 
thr->bh_opaque);

over-80 line

+
+                /* play safe, don't reuse bh_ctx on further connection 
attempts */

and here

+                thr->bh_ctx = NULL;
+            }
+            break;
+        case CONNECT_THREAD_RUNNING_DETACHED:
+            do_free = true;
+            break;
+        default:
+            abort();
          }
-        break;
-    case CONNECT_THREAD_RUNNING_DETACHED:
-        do_free = true;
-        break;
-    default:
-        abort();
      }
- qemu_mutex_unlock(&thr->mutex);
->       if (do_free) {
          nbd_free_connect_thread(thr);
      }
@@ -443,36 +441,33 @@ nbd_co_establish_connection(BlockDriverState *bs, Error 
**errp)
      BDRVNBDState *s = bs->opaque;
      NBDConnectThread *thr = s->connect_thread;
- qemu_mutex_lock(&thr->mutex);
-
-    switch (thr->state) {
-    case CONNECT_THREAD_FAIL:
-    case CONNECT_THREAD_NONE:
-        error_free(thr->err);
-        thr->err = NULL;
-        thr->state = CONNECT_THREAD_RUNNING;
-        qemu_thread_create(&thread, "nbd-connect",
-                           connect_thread_func, thr, QEMU_THREAD_DETACHED);
-        break;
-    case CONNECT_THREAD_SUCCESS:
-        /* Previous attempt finally succeeded in background */
-        thr->state = CONNECT_THREAD_NONE;
-        s->sioc = thr->sioc;
-        thr->sioc = NULL;
-        yank_register_function(BLOCKDEV_YANK_INSTANCE(bs->node_name),
-                               nbd_yank, bs);
-        qemu_mutex_unlock(&thr->mutex);
-        return 0;
-    case CONNECT_THREAD_RUNNING:
-        /* Already running, will wait */
-        break;
-    default:
-        abort();
-    }
-
-    thr->bh_ctx = qemu_get_current_aio_context();
+    WITH_QEMU_LOCK_GUARD(&thr->mutex) {
+        switch (thr->state) {
+        case CONNECT_THREAD_FAIL:
+        case CONNECT_THREAD_NONE:
+            error_free(thr->err);
+            thr->err = NULL;
+            thr->state = CONNECT_THREAD_RUNNING;
+            qemu_thread_create(&thread, "nbd-connect",
+                               connect_thread_func, thr, QEMU_THREAD_DETACHED);
+            break;
+        case CONNECT_THREAD_SUCCESS:
+            /* Previous attempt finally succeeded in background */
+            thr->state = CONNECT_THREAD_NONE;
+            s->sioc = thr->sioc;
+            thr->sioc = NULL;
+            yank_register_function(BLOCKDEV_YANK_INSTANCE(bs->node_name),
+                                   nbd_yank, bs);
+            return 0;
+        case CONNECT_THREAD_RUNNING:
+            /* Already running, will wait */
+            break;
+        default:
+            abort();
+        }
- qemu_mutex_unlock(&thr->mutex);
+        thr->bh_ctx = qemu_get_current_aio_context();
+    }
/*
@@ -486,46 +481,45 @@ nbd_co_establish_connection(BlockDriverState *bs, Error 
**errp)
      s->wait_connect = true;
      qemu_coroutine_yield();
- qemu_mutex_lock(&thr->mutex); - switch (thr->state) {
-    case CONNECT_THREAD_SUCCESS:
-    case CONNECT_THREAD_FAIL:
-        thr->state = CONNECT_THREAD_NONE;
-        error_propagate(errp, thr->err);
-        thr->err = NULL;
-        s->sioc = thr->sioc;
-        thr->sioc = NULL;
-        if (s->sioc) {
-            yank_register_function(BLOCKDEV_YANK_INSTANCE(bs->node_name),
-                                   nbd_yank, bs);
-        }
-        ret = (s->sioc ? 0 : -1);
-        break;
-    case CONNECT_THREAD_RUNNING:
-    case CONNECT_THREAD_RUNNING_DETACHED:
-        /*
-         * Obviously, drained section wants to start. Report the attempt as
-         * failed. Still connect thread is executing in background, and its
-         * result may be used for next connection attempt.
-         */
-        ret = -1;
-        error_setg(errp, "Connection attempt cancelled by other operation");
-        break;
+    WITH_QEMU_LOCK_GUARD(&thr->mutex) {
+        switch (thr->state) {
+        case CONNECT_THREAD_SUCCESS:
+        case CONNECT_THREAD_FAIL:
+            thr->state = CONNECT_THREAD_NONE;
+            error_propagate(errp, thr->err);
+            thr->err = NULL;
+            s->sioc = thr->sioc;
+            thr->sioc = NULL;
+            if (s->sioc) {
+                yank_register_function(BLOCKDEV_YANK_INSTANCE(bs->node_name),
+                                       nbd_yank, bs);
+            }
+            ret = (s->sioc ? 0 : -1);
+            break;
+        case CONNECT_THREAD_RUNNING:
+        case CONNECT_THREAD_RUNNING_DETACHED:
+            /*
+             * Obviously, drained section wants to start. Report the attempt as
+             * failed. Still connect thread is executing in background, and its
+             * result may be used for next connection attempt.
+             */
+            ret = -1;
+            error_setg(errp, "Connection attempt cancelled by other 
operation");
+            break;
- case CONNECT_THREAD_NONE:
-        /*
-         * Impossible. We've seen this thread running. So it should be
-         * running or at least give some results.
-         */
-        abort();
+        case CONNECT_THREAD_NONE:
+            /*
+             * Impossible. We've seen this thread running. So it should be
+             * running or at least give some results.
+             */
+            abort();
- default:
-        abort();
+        default:
+            abort();
+        }
      }
- qemu_mutex_unlock(&thr->mutex);
-
      return ret;
  }
@@ -546,25 +540,23 @@ static void nbd_co_establish_connection_cancel(BlockDriverState *bs,
      bool wake = false;
      bool do_free = false;
- qemu_mutex_lock(&thr->mutex);
-
-    if (thr->state == CONNECT_THREAD_RUNNING) {
-        /* We can cancel only in running state, when bh is not yet scheduled */
-        thr->bh_ctx = NULL;
-        if (s->wait_connect) {
-            s->wait_connect = false;
-            wake = true;
-        }
-        if (detach) {
-            thr->state = CONNECT_THREAD_RUNNING_DETACHED;
-            s->connect_thread = NULL;
+    WITH_QEMU_LOCK_GUARD(&thr->mutex) {
+        if (thr->state == CONNECT_THREAD_RUNNING) {
+            /* We can cancel only in running state, when bh is not yet 
scheduled */

over-80 line

+            thr->bh_ctx = NULL;
+            if (s->wait_connect) {
+                s->wait_connect = false;
+                wake = true;
+            }
+            if (detach) {
+                thr->state = CONNECT_THREAD_RUNNING_DETACHED;
+                s->connect_thread = NULL;
+            }
+        } else if (detach) {
+            do_free = true;
          }
-    } else if (detach) {
-        do_free = true;
      }
- qemu_mutex_unlock(&thr->mutex);
-
      if (do_free) {
          nbd_free_connect_thread(thr);
          s->connect_thread = NULL;



For nbd.c we mostly change simple critical sections

qemu_mutex_lock()
...
qemu_mutex_unlock()

into

WITH_QEMU_LOCK_GUARD() {
...
}

And don't eliminate any gotos.. Hmm. On the first sight increasing nesting of 
the code doesn't make it more beautiful.
But I understand that context-manager concept is safer than calling unlock() by 
hand, and with nested block the critical section becomes more obvious. So, with 
fixed over-80 lines:

Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>

--
Best regards,
Vladimir



reply via email to

[Prev in Thread] Current Thread [Next in Thread]