[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH v7 10/12] multifd: Support for zero pages transmission
From: |
Leonardo Brás |
Subject: |
Re: [PATCH v7 10/12] multifd: Support for zero pages transmission |
Date: |
Fri, 02 Sep 2022 10:27:25 -0300 |
User-agent: |
Evolution 3.44.4 |
On Tue, 2022-08-02 at 08:39 +0200, Juan Quintela wrote:
> This patch adds counters and similar. Logic will be added on the
> following patch.
>
> Signed-off-by: Juan Quintela <quintela@redhat.com>
>
> ---
>
> Added counters for duplicated/non duplicated pages.
> Removed reviewed by from David.
> Add total_zero_pages
> ---
> migration/multifd.h | 17 ++++++++++++++++-
> migration/multifd.c | 36 +++++++++++++++++++++++++++++-------
> migration/ram.c | 2 --
> migration/trace-events | 8 ++++----
> 4 files changed, 49 insertions(+), 14 deletions(-)
>
> diff --git a/migration/multifd.h b/migration/multifd.h
> index cd389d18d2..a1b852200d 100644
> --- a/migration/multifd.h
> +++ b/migration/multifd.h
> @@ -47,7 +47,10 @@ typedef struct {
> /* size of the next packet that contains pages */
> uint32_t next_packet_size;
> uint64_t packet_num;
> - uint64_t unused[4]; /* Reserved for future use */
> + /* zero pages */
> + uint32_t zero_pages;
> + uint32_t unused32[1]; /* Reserved for future use */
> + uint64_t unused64[3]; /* Reserved for future use */
> char ramblock[256];
> uint64_t offset[];
> } __attribute__((packed)) MultiFDPacket_t;
> @@ -127,6 +130,8 @@ typedef struct {
> uint64_t num_packets;
> /* non zero pages sent through this channel */
> uint64_t total_normal_pages;
> + /* zero pages sent through this channel */
> + uint64_t total_zero_pages;
> /* buffers to send */
> struct iovec *iov;
> /* number of iovs used */
> @@ -135,6 +140,10 @@ typedef struct {
> ram_addr_t *normal;
> /* num of non zero pages */
> uint32_t normal_num;
> + /* Pages that are zero */
> + ram_addr_t *zero;
> + /* num of zero pages */
> + uint32_t zero_num;
More of an organization viewpoint:
I can't see total_zero_pages, zero[] and zero_num as Multifd "Parameters".
But OTOH there are other data like this in the struct for keeping migration
status, so not an issue.
> /* used for compression methods */
> void *data;
> } MultiFDSendParams;
> @@ -184,12 +193,18 @@ typedef struct {
> uint8_t *host;
> /* non zero pages recv through this channel */
> uint64_t total_normal_pages;
> + /* zero pages recv through this channel */
> + uint64_t total_zero_pages;
> /* buffers to recv */
> struct iovec *iov;
> /* Pages that are not zero */
> ram_addr_t *normal;
> /* num of non zero pages */
> uint32_t normal_num;
> + /* Pages that are zero */
> + ram_addr_t *zero;
> + /* num of zero pages */
> + uint32_t zero_num;
> /* used for de-compression methods */
> void *data;
> } MultiFDRecvParams;
> diff --git a/migration/multifd.c b/migration/multifd.c
> index 68fc9f8e88..4473d9f834 100644
> --- a/migration/multifd.c
> +++ b/migration/multifd.c
> @@ -263,6 +263,7 @@ static void multifd_send_fill_packet(MultiFDSendParams *p)
> packet->normal_pages = cpu_to_be32(p->normal_num);
> packet->next_packet_size = cpu_to_be32(p->next_packet_size);
> packet->packet_num = cpu_to_be64(p->packet_num);
> + packet->zero_pages = cpu_to_be32(p->zero_num);
>
> if (p->pages->block) {
> strncpy(packet->ramblock, p->pages->block->idstr, 256);
> @@ -323,7 +324,15 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams
> *p, Error **errp)
> p->next_packet_size = be32_to_cpu(packet->next_packet_size);
> p->packet_num = be64_to_cpu(packet->packet_num);
>
> - if (p->normal_num == 0) {
> + p->zero_num = be32_to_cpu(packet->zero_pages);
> + if (p->zero_num > packet->pages_alloc - p->normal_num) {
> + error_setg(errp, "multifd: received packet "
> + "with %u zero pages and expected maximum pages are %u",
> + p->zero_num, packet->pages_alloc - p->normal_num) ;
> + return -1;
> + }
> +
> + if (p->normal_num == 0 && p->zero_num == 0) {
> return 0;
> }
>
> @@ -432,6 +441,8 @@ static int multifd_send_pages(QEMUFile *f)
> ram_counters.multifd_bytes += p->sent_bytes;
> qemu_file_acct_rate_limit(f, p->sent_bytes);
> p->sent_bytes = 0;
> + ram_counters.normal += p->normal_num;
> + ram_counters.duplicate += p->zero_num;
> qemu_mutex_unlock(&p->mutex);
> qemu_sem_post(&p->sem);
>
> @@ -545,6 +556,8 @@ void multifd_save_cleanup(void)
> p->iov = NULL;
> g_free(p->normal);
> p->normal = NULL;
> + g_free(p->zero);
> + p->zero = NULL;
> multifd_send_state->ops->send_cleanup(p, &local_err);
> if (local_err) {
> migrate_set_error(migrate_get_current(), local_err);
> @@ -666,6 +679,7 @@ static void *multifd_send_thread(void *opaque)
> qemu_mutex_unlock(&p->mutex);
>
> p->normal_num = 0;
> + p->zero_num = 0;
>
> if (use_zero_copy_send) {
> p->iovs_num = 0;
> @@ -687,8 +701,8 @@ static void *multifd_send_thread(void *opaque)
> }
> multifd_send_fill_packet(p);
>
> - trace_multifd_send(p->id, packet_num, p->normal_num, p->flags,
> - p->next_packet_size);
> + trace_multifd_send(p->id, packet_num, p->normal_num, p->zero_num,
> + p->flags, p->next_packet_size);
>
> if (use_zero_copy_send) {
> /* Send header first, without zerocopy */
> @@ -712,6 +726,7 @@ static void *multifd_send_thread(void *opaque)
> qemu_mutex_lock(&p->mutex);
> p->num_packets++;
> p->total_normal_pages += p->normal_num;
> + p->total_zero_pages += p->zero_num;
I can see it getting declared, incremented and used. But where is it initialized
in zero? I mean, should it not have 'p->total_normal_pages = 0;' somewhere in
setup?
(I understand multifd_save_setup() allocates a multifd_send_state->params with
g_new0(),but other variables are zeroed there, like p->pending_job and
p->write_flags, so why not?)
> p->pages->num = 0;
> p->pages->block = NULL;
> p->sent_bytes += p->packet_len;;
> @@ -753,7 +768,8 @@ out:
> qemu_mutex_unlock(&p->mutex);
>
> rcu_unregister_thread();
> - trace_multifd_send_thread_end(p->id, p->num_packets,
> p->total_normal_pages);
> + trace_multifd_send_thread_end(p->id, p->num_packets,
> p->total_normal_pages,
> + p->total_zero_pages);
>
> return NULL;
> }
> @@ -938,6 +954,7 @@ int multifd_save_setup(Error **errp)
> p->normal = g_new0(ram_addr_t, page_count);
> p->page_size = qemu_target_page_size();
> p->page_count = page_count;
> + p->zero = g_new0(ram_addr_t, page_count);
>
> if (migrate_use_zero_copy_send()) {
> p->write_flags = QIO_CHANNEL_WRITE_FLAG_ZERO_COPY;
> @@ -1046,6 +1063,8 @@ int multifd_load_cleanup(Error **errp)
> p->iov = NULL;
> g_free(p->normal);
> p->normal = NULL;
> + g_free(p->zero);
> + p->zero = NULL;
> multifd_recv_state->ops->recv_cleanup(p);
> }
> qemu_sem_destroy(&multifd_recv_state->sem_sync);
> @@ -1116,13 +1135,14 @@ static void *multifd_recv_thread(void *opaque)
> break;
> }
>
> - trace_multifd_recv(p->id, p->packet_num, p->normal_num, p->flags,
> - p->next_packet_size);
> + trace_multifd_recv(p->id, p->packet_num, p->normal_num, p->zero_num,
> + p->flags, p->next_packet_size);
> sync_needed = p->flags & MULTIFD_FLAG_SYNC;
> /* recv methods don't know how to handle the SYNC flag */
> p->flags &= ~MULTIFD_FLAG_SYNC;
> p->num_packets++;
> p->total_normal_pages += p->normal_num;
> + p->total_normal_pages += p->zero_num;
> qemu_mutex_unlock(&p->mutex);
>
> if (p->normal_num) {
> @@ -1147,7 +1167,8 @@ static void *multifd_recv_thread(void *opaque)
> qemu_mutex_unlock(&p->mutex);
>
> rcu_unregister_thread();
> - trace_multifd_recv_thread_end(p->id, p->num_packets,
> p->total_normal_pages);
> + trace_multifd_recv_thread_end(p->id, p->num_packets,
> p->total_normal_pages,
> + p->total_zero_pages);
>
> return NULL;
> }
> @@ -1187,6 +1208,7 @@ int multifd_load_setup(Error **errp)
> p->normal = g_new0(ram_addr_t, page_count);
> p->page_count = page_count;
> p->page_size = qemu_target_page_size();
> + p->zero = g_new0(ram_addr_t, page_count);
> }
>
> for (i = 0; i < thread_count; i++) {
> diff --git a/migration/ram.c b/migration/ram.c
> index 291ba5c0ed..2af70f517a 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -1412,8 +1412,6 @@ static int ram_save_multifd_page(RAMState *rs, RAMBlock
> *block,
> if (multifd_queue_page(rs->f, block, offset) < 0) {
> return -1;
> }
> - ram_counters.normal++;
> -
> return 1;
> }
>
> diff --git a/migration/trace-events b/migration/trace-events
> index a34afe7b85..d34aec177c 100644
> --- a/migration/trace-events
> +++ b/migration/trace-events
> @@ -120,21 +120,21 @@ postcopy_preempt_reset_channel(void) ""
>
> # multifd.c
> multifd_new_send_channel_async(uint8_t id) "channel %u"
> -multifd_recv(uint8_t id, uint64_t packet_num, uint32_t used, uint32_t flags,
> uint32_t next_packet_size) "channel %u packet_num %" PRIu64 " pages %u flags
> 0x%x next packet size %u"
> +multifd_recv(uint8_t id, uint64_t packet_num, uint32_t normal, uint32_t
> zero, uint32_t flags, uint32_t next_packet_size) "channel %u packet_num %"
> PRIu64 " normal pages %u zero pages %u flags 0x%x next packet size %u"
> multifd_recv_new_channel(uint8_t id) "channel %u"
> multifd_recv_sync_main(long packet_num) "packet num %ld"
> multifd_recv_sync_main_signal(uint8_t id) "channel %u"
> multifd_recv_sync_main_wait(uint8_t id) "channel %u"
> multifd_recv_terminate_threads(bool error) "error %d"
> -multifd_recv_thread_end(uint8_t id, uint64_t packets, uint64_t pages)
> "channel %u packets %" PRIu64 " pages %" PRIu64
> +multifd_recv_thread_end(uint8_t id, uint64_t packets, uint64_t normal_pages,
> uint64_t zero_pages) "channel %u packets %" PRIu64 " normal pages %" PRIu64 "
> zero pages %" PRIu64
> multifd_recv_thread_start(uint8_t id) "%u"
> -multifd_send(uint8_t id, uint64_t packet_num, uint32_t normal, uint32_t
> flags, uint32_t next_packet_size) "channel %u packet_num %" PRIu64 " normal
> pages %u flags 0x%x next packet size %u"
> +multifd_send(uint8_t id, uint64_t packet_num, uint32_t normalpages, uint32_t
> zero_pages, uint32_t flags, uint32_t next_packet_size) "channel %u packet_num
> %" PRIu64 " normal pages %u zero pages %u flags 0x%x next packet size %u"
> multifd_send_error(uint8_t id) "channel %u"
> multifd_send_sync_main(long packet_num) "packet num %ld"
> multifd_send_sync_main_signal(uint8_t id) "channel %u"
> multifd_send_sync_main_wait(uint8_t id) "channel %u"
> multifd_send_terminate_threads(bool error) "error %d"
> -multifd_send_thread_end(uint8_t id, uint64_t packets, uint64_t normal_pages)
> "channel %u packets %" PRIu64 " normal pages %" PRIu64
> +multifd_send_thread_end(uint8_t id, uint64_t packets, uint64_t normal_pages,
> uint64_t zero_pages) "channel %u packets %" PRIu64 " normal pages %" PRIu64
> " zero pages %" PRIu64
> multifd_send_thread_start(uint8_t id) "%u"
> multifd_tls_outgoing_handshake_start(void *ioc, void *tioc, const char
> *hostname) "ioc=%p tioc=%p hostname=%s"
> multifd_tls_outgoing_handshake_error(void *ioc, const char *err) "ioc=%p
> err=%s"
[Prev in Thread] |
Current Thread |
[Next in Thread] |
- Re: [PATCH v7 10/12] multifd: Support for zero pages transmission,
Leonardo Brás <=