qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 28/52] migration/rdma: Check negative error values the same w


From: Zhijian Li (Fujitsu)
Subject: Re: [PATCH 28/52] migration/rdma: Check negative error values the same way everywhere
Date: Mon, 25 Sep 2023 06:26:42 +0000
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:102.0) Gecko/20100101 Thunderbird/102.11.0


On 18/09/2023 22:41, Markus Armbruster wrote:
> When a function returns 0 on success, negative value on error,
> checking for non-zero suffices, but checking for negative is clearer.
> So do that.
> 

This patch is no my favor convention.

@Peter, Juan

I'd like to hear your opinions.

Thanks
Zhijian


> Signed-off-by: Markus Armbruster <armbru@redhat.com>
> ---
>   migration/rdma.c | 82 ++++++++++++++++++++++++------------------------
>   1 file changed, 41 insertions(+), 41 deletions(-)
> 
> diff --git a/migration/rdma.c b/migration/rdma.c
> index 62d95b7d2c..6c643a1b30 100644
> --- a/migration/rdma.c
> +++ b/migration/rdma.c
> @@ -947,7 +947,7 @@ static int qemu_rdma_resolve_host(RDMAContext *rdma, 
> Error **errp)
>   
>       /* create CM id */
>       ret = rdma_create_id(rdma->channel, &rdma->cm_id, NULL, RDMA_PS_TCP);
> -    if (ret) {
> +    if (ret < 0) {
>           ERROR(errp, "could not create channel id");
>           goto err_resolve_create_id;
>       }
> @@ -968,10 +968,10 @@ static int qemu_rdma_resolve_host(RDMAContext *rdma, 
> Error **errp)
>   
>           ret = rdma_resolve_addr(rdma->cm_id, NULL, e->ai_dst_addr,
>                   RDMA_RESOLVE_TIMEOUT_MS);
> -        if (!ret) {
> +        if (ret >= 0) {
>               if (e->ai_family == AF_INET6) {
>                   ret = qemu_rdma_broken_ipv6_kernel(rdma->cm_id->verbs, 
> errp);
> -                if (ret) {
> +                if (ret < 0) {
>                       continue;
>                   }
>               }
> @@ -988,7 +988,7 @@ route:
>       qemu_rdma_dump_gid("source_resolve_addr", rdma->cm_id);
>   
>       ret = rdma_get_cm_event(rdma->channel, &cm_event);
> -    if (ret) {
> +    if (ret < 0) {
>           ERROR(errp, "could not perform event_addr_resolved");
>           goto err_resolve_get_addr;
>       }
> @@ -1004,13 +1004,13 @@ route:
>   
>       /* resolve route */
>       ret = rdma_resolve_route(rdma->cm_id, RDMA_RESOLVE_TIMEOUT_MS);
> -    if (ret) {
> +    if (ret < 0) {
>           ERROR(errp, "could not resolve rdma route");
>           goto err_resolve_get_addr;
>       }
>   
>       ret = rdma_get_cm_event(rdma->channel, &cm_event);
> -    if (ret) {
> +    if (ret < 0) {
>           ERROR(errp, "could not perform event_route_resolved");
>           goto err_resolve_get_addr;
>       }
> @@ -1118,7 +1118,7 @@ static int qemu_rdma_alloc_qp(RDMAContext *rdma)
>       attr.qp_type = IBV_QPT_RC;
>   
>       ret = rdma_create_qp(rdma->cm_id, rdma->pd, &attr);
> -    if (ret) {
> +    if (ret < 0) {
>           return -1;
>       }
>   
> @@ -1551,7 +1551,7 @@ static int qemu_rdma_wait_comp_channel(RDMAContext 
> *rdma,
>   
>                   if (pfds[1].revents) {
>                       ret = rdma_get_cm_event(rdma->channel, &cm_event);
> -                    if (ret) {
> +                    if (ret < 0) {
>                           error_report("failed to get cm event while wait "
>                                        "completion channel");
>                           return -1;
> @@ -1652,12 +1652,12 @@ static int qemu_rdma_block_for_wrid(RDMAContext *rdma,
>   
>       while (1) {
>           ret = qemu_rdma_wait_comp_channel(rdma, ch);
> -        if (ret) {
> +        if (ret < 0) {
>               goto err_block_for_wrid;
>           }
>   
>           ret = ibv_get_cq_event(ch, &cq, &cq_ctx);
> -        if (ret) {
> +        if (ret < 0) {
>               perror("ibv_get_cq_event");
>               goto err_block_for_wrid;
>           }
> @@ -1888,7 +1888,7 @@ static int qemu_rdma_exchange_send(RDMAContext *rdma, 
> RDMAControlHeader *head,
>        */
>       if (resp) {
>           ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_DATA);
> -        if (ret) {
> +        if (ret < 0) {
>               error_report("rdma migration: error posting"
>                       " extra control recv for anticipated result!");
>               return -1;
> @@ -1899,7 +1899,7 @@ static int qemu_rdma_exchange_send(RDMAContext *rdma, 
> RDMAControlHeader *head,
>        * Post a WR to replace the one we just consumed for the READY message.
>        */
>       ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY);
> -    if (ret) {
> +    if (ret < 0) {
>           error_report("rdma migration: error posting first control recv!");
>           return -1;
>       }
> @@ -1986,7 +1986,7 @@ static int qemu_rdma_exchange_recv(RDMAContext *rdma, 
> RDMAControlHeader *head,
>        * Post a new RECV work request to replace the one we just consumed.
>        */
>       ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY);
> -    if (ret) {
> +    if (ret < 0) {
>           error_report("rdma migration: error posting second control recv!");
>           return -1;
>       }
> @@ -2311,7 +2311,7 @@ static int qemu_rdma_write(QEMUFile *f, RDMAContext 
> *rdma,
>       /* If we cannot merge it, we flush the current buffer first. */
>       if (!qemu_rdma_buffer_mergable(rdma, current_addr, len)) {
>           ret = qemu_rdma_write_flush(f, rdma);
> -        if (ret) {
> +        if (ret < 0) {
>               return -1;
>           }
>           rdma->current_length = 0;
> @@ -2441,12 +2441,12 @@ static int qemu_rdma_source_init(RDMAContext *rdma, 
> bool pin_all, Error **errp)
>       rdma->pin_all = pin_all;
>   
>       ret = qemu_rdma_resolve_host(rdma, errp);
> -    if (ret) {
> +    if (ret < 0) {
>           goto err_rdma_source_init;
>       }
>   
>       ret = qemu_rdma_alloc_pd_cq(rdma);
> -    if (ret) {
> +    if (ret < 0) {
>           ERROR(errp, "rdma migration: error allocating pd and cq! Your 
> mlock()"
>                       " limits may be too low. Please check $ ulimit -a # and 
> "
>                       "search for 'ulimit -l' in the output");
> @@ -2454,7 +2454,7 @@ static int qemu_rdma_source_init(RDMAContext *rdma, 
> bool pin_all, Error **errp)
>       }
>   
>       ret = qemu_rdma_alloc_qp(rdma);
> -    if (ret) {
> +    if (ret < 0) {
>           ERROR(errp, "rdma migration: error allocating qp!");
>           goto err_rdma_source_init;
>       }
> @@ -2471,7 +2471,7 @@ static int qemu_rdma_source_init(RDMAContext *rdma, 
> bool pin_all, Error **errp)
>   
>       for (idx = 0; idx < RDMA_WRID_MAX; idx++) {
>           ret = qemu_rdma_reg_control(rdma, idx);
> -        if (ret) {
> +        if (ret < 0) {
>               ERROR(errp, "rdma migration: error registering %d control!",
>                                                               idx);
>               goto err_rdma_source_init;
> @@ -2545,13 +2545,13 @@ static int qemu_rdma_connect(RDMAContext *rdma, bool 
> return_path,
>       caps_to_network(&cap);
>   
>       ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY);
> -    if (ret) {
> +    if (ret < 0) {
>           ERROR(errp, "posting second control recv");
>           goto err_rdma_source_connect;
>       }
>   
>       ret = rdma_connect(rdma->cm_id, &conn_param);
> -    if (ret) {
> +    if (ret < 0) {
>           perror("rdma_connect");
>           ERROR(errp, "connecting to destination!");
>           goto err_rdma_source_connect;
> @@ -2565,7 +2565,7 @@ static int qemu_rdma_connect(RDMAContext *rdma, bool 
> return_path,
>               ERROR(errp, "failed to get cm event");
>           }
>       }
> -    if (ret) {
> +    if (ret < 0) {
>           perror("rdma_get_cm_event after rdma_connect");
>           goto err_rdma_source_connect;
>       }
> @@ -2633,7 +2633,7 @@ static int qemu_rdma_dest_init(RDMAContext *rdma, Error 
> **errp)
>   
>       /* create CM id */
>       ret = rdma_create_id(rdma->channel, &listen_id, NULL, RDMA_PS_TCP);
> -    if (ret) {
> +    if (ret < 0) {
>           ERROR(errp, "could not create cm_id!");
>           goto err_dest_init_create_listen_id;
>       }
> @@ -2649,7 +2649,7 @@ static int qemu_rdma_dest_init(RDMAContext *rdma, Error 
> **errp)
>   
>       ret = rdma_set_option(listen_id, RDMA_OPTION_ID, 
> RDMA_OPTION_ID_REUSEADDR,
>                             &reuse, sizeof reuse);
> -    if (ret) {
> +    if (ret < 0) {
>           ERROR(errp, "Error: could not set REUSEADDR option");
>           goto err_dest_init_bind_addr;
>       }
> @@ -2658,12 +2658,12 @@ static int qemu_rdma_dest_init(RDMAContext *rdma, 
> Error **errp)
>               &((struct sockaddr_in *) e->ai_dst_addr)->sin_addr, ip, sizeof 
> ip);
>           trace_qemu_rdma_dest_init_trying(rdma->host, ip);
>           ret = rdma_bind_addr(listen_id, e->ai_dst_addr);
> -        if (ret) {
> +        if (ret < 0) {
>               continue;
>           }
>           if (e->ai_family == AF_INET6) {
>               ret = qemu_rdma_broken_ipv6_kernel(listen_id->verbs, errp);
> -            if (ret) {
> +            if (ret < 0) {
>                   continue;
>               }
>           }
> @@ -3303,7 +3303,7 @@ static void rdma_cm_poll_handler(void *opaque)
>       MigrationIncomingState *mis = migration_incoming_get_current();
>   
>       ret = rdma_get_cm_event(rdma->channel, &cm_event);
> -    if (ret) {
> +    if (ret < 0) {
>           error_report("get_cm_event failed %d", errno);
>           return;
>       }
> @@ -3343,7 +3343,7 @@ static int qemu_rdma_accept(RDMAContext *rdma)
>       int idx;
>   
>       ret = rdma_get_cm_event(rdma->channel, &cm_event);
> -    if (ret) {
> +    if (ret < 0) {
>           goto err_rdma_dest_wait;
>       }
>   
> @@ -3413,13 +3413,13 @@ static int qemu_rdma_accept(RDMAContext *rdma)
>       qemu_rdma_dump_id("dest_init", verbs);
>   
>       ret = qemu_rdma_alloc_pd_cq(rdma);
> -    if (ret) {
> +    if (ret < 0) {
>           error_report("rdma migration: error allocating pd and cq!");
>           goto err_rdma_dest_wait;
>       }
>   
>       ret = qemu_rdma_alloc_qp(rdma);
> -    if (ret) {
> +    if (ret < 0) {
>           error_report("rdma migration: error allocating qp!");
>           goto err_rdma_dest_wait;
>       }
> @@ -3428,7 +3428,7 @@ static int qemu_rdma_accept(RDMAContext *rdma)
>   
>       for (idx = 0; idx < RDMA_WRID_MAX; idx++) {
>           ret = qemu_rdma_reg_control(rdma, idx);
> -        if (ret) {
> +        if (ret < 0) {
>               error_report("rdma: error registering %d control", idx);
>               goto err_rdma_dest_wait;
>           }
> @@ -3446,13 +3446,13 @@ static int qemu_rdma_accept(RDMAContext *rdma)
>       }
>   
>       ret = rdma_accept(rdma->cm_id, &conn_param);
> -    if (ret) {
> +    if (ret < 0) {
>           error_report("rdma_accept failed");
>           goto err_rdma_dest_wait;
>       }
>   
>       ret = rdma_get_cm_event(rdma->channel, &cm_event);
> -    if (ret) {
> +    if (ret < 0) {
>           error_report("rdma_accept get_cm_event failed");
>           goto err_rdma_dest_wait;
>       }
> @@ -3467,7 +3467,7 @@ static int qemu_rdma_accept(RDMAContext *rdma)
>       rdma->connected = true;
>   
>       ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY);
> -    if (ret) {
> +    if (ret < 0) {
>           error_report("rdma migration: error posting second control recv");
>           goto err_rdma_dest_wait;
>       }
> @@ -3596,7 +3596,7 @@ static int qemu_rdma_registration_handle(QEMUFile *f)
>   
>               if (rdma->pin_all) {
>                   ret = qemu_rdma_reg_whole_ram_blocks(rdma);
> -                if (ret) {
> +                if (ret < 0) {
>                       error_report("rdma migration: error dest "
>                                       "registering ram blocks");
>                       goto err;
> @@ -4057,7 +4057,7 @@ static void rdma_accept_incoming_migration(void *opaque)
>       trace_qemu_rdma_accept_incoming_migration();
>       ret = qemu_rdma_accept(rdma);
>   
> -    if (ret) {
> +    if (ret < 0) {
>           fprintf(stderr, "RDMA ERROR: Migration initialization failed\n");
>           return;
>       }
> @@ -4101,7 +4101,7 @@ void rdma_start_incoming_migration(const char 
> *host_port, Error **errp)
>       }
>   
>       ret = qemu_rdma_dest_init(rdma, errp);
> -    if (ret) {
> +    if (ret < 0) {
>           goto err;
>       }
>   
> @@ -4109,7 +4109,7 @@ void rdma_start_incoming_migration(const char 
> *host_port, Error **errp)
>   
>       ret = rdma_listen(rdma->listen_id, 5);
>   
> -    if (ret) {
> +    if (ret < 0) {
>           ERROR(errp, "listening on socket!");
>           goto cleanup_rdma;
>       }
> @@ -4151,14 +4151,14 @@ void rdma_start_outgoing_migration(void *opaque,
>   
>       ret = qemu_rdma_source_init(rdma, migrate_rdma_pin_all(), errp);
>   
> -    if (ret) {
> +    if (ret < 0) {
>           goto err;
>       }
>   
>       trace_rdma_start_outgoing_migration_after_rdma_source_init();
>       ret = qemu_rdma_connect(rdma, false, errp);
>   
> -    if (ret) {
> +    if (ret < 0) {
>           goto err;
>       }
>   
> @@ -4173,13 +4173,13 @@ void rdma_start_outgoing_migration(void *opaque,
>           ret = qemu_rdma_source_init(rdma_return_path,
>                                       migrate_rdma_pin_all(), errp);
>   
> -        if (ret) {
> +        if (ret < 0) {
>               goto return_path_err;
>           }
>   
>           ret = qemu_rdma_connect(rdma_return_path, true, errp);
>   
> -        if (ret) {
> +        if (ret < 0) {
>               goto return_path_err;
>           }
>   

reply via email to

[Prev in Thread] Current Thread [Next in Thread]