[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH v2 5/8] migration/ram: ensure write persistence on l
From: |
Haozhong Zhang |
Subject: |
[Qemu-devel] [PATCH v2 5/8] migration/ram: ensure write persistence on loading zero pages to PMEM |
Date: |
Wed, 7 Feb 2018 15:33:28 +0800 |
When loading a zero page, check whether it will be loaded to
persistent memory If yes, load it by libpmem function
pmem_memset_nodrain(). Combined with a call to pmem_drain() at the
end of RAM loading, we can guarantee all those zero pages are
persistently loaded.
Depending on the host HW/SW configurations, pmem_drain() can be
"sfence". Therefore, we do not call pmem_drain() after each
pmem_memset_nodrain(), or use pmem_memset_persist() (equally
pmem_memset_nodrain() + pmem_drain()), in order to avoid unnecessary
overhead.
Signed-off-by: Haozhong Zhang <address@hidden>
---
include/qemu/pmem.h | 9 +++++++++
migration/ram.c | 34 +++++++++++++++++++++++++++++-----
2 files changed, 38 insertions(+), 5 deletions(-)
diff --git a/include/qemu/pmem.h b/include/qemu/pmem.h
index 9017596ff0..861d8ecc21 100644
--- a/include/qemu/pmem.h
+++ b/include/qemu/pmem.h
@@ -26,6 +26,15 @@ pmem_memcpy_persist(void *pmemdest, const void *src, size_t
len)
return memcpy(pmemdest, src, len);
}
+static inline void *pmem_memset_nodrain(void *pmemdest, int c, size_t len)
+{
+ return memset(pmemdest, c, len);
+}
+
+static inline void pmem_drain(void)
+{
+}
+
#endif /* CONFIG_LIBPMEM */
#endif /* !QEMU_PMEM_H */
diff --git a/migration/ram.c b/migration/ram.c
index cb1950f3eb..5a0e503818 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -49,6 +49,7 @@
#include "qemu/rcu_queue.h"
#include "migration/colo.h"
#include "migration/block.h"
+#include "qemu/pmem.h"
/***********************************************************/
/* ram save/restore */
@@ -2467,6 +2468,20 @@ static inline void *host_from_ram_block_offset(RAMBlock
*block,
return block->host + offset;
}
+static void ram_handle_compressed_common(void *host, uint8_t ch, uint64_t size,
+ bool is_pmem)
+{
+ if (!ch && is_zero_range(host, size)) {
+ return;
+ }
+
+ if (!is_pmem) {
+ memset(host, ch, size);
+ } else {
+ pmem_memset_nodrain(host, ch, size);
+ }
+}
+
/**
* ram_handle_compressed: handle the zero page case
*
@@ -2479,9 +2494,7 @@ static inline void *host_from_ram_block_offset(RAMBlock
*block,
*/
void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
{
- if (ch != 0 || !is_zero_range(host, size)) {
- memset(host, ch, size);
- }
+ return ram_handle_compressed_common(host, ch, size, false);
}
static void *do_data_decompress(void *opaque)
@@ -2823,6 +2836,7 @@ static int ram_load(QEMUFile *f, void *opaque, int
version_id)
bool postcopy_running = postcopy_is_running();
/* ADVISE is earlier, it shows the source has the postcopy capability on */
bool postcopy_advised = postcopy_is_advised();
+ bool need_pmem_drain = false;
seq_iter++;
@@ -2848,6 +2862,8 @@ static int ram_load(QEMUFile *f, void *opaque, int
version_id)
ram_addr_t addr, total_ram_bytes;
void *host = NULL;
uint8_t ch;
+ RAMBlock *block = NULL;
+ bool is_pmem = false;
addr = qemu_get_be64(f);
flags = addr & ~TARGET_PAGE_MASK;
@@ -2864,7 +2880,7 @@ static int ram_load(QEMUFile *f, void *opaque, int
version_id)
if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
- RAMBlock *block = ram_block_from_stream(f, flags);
+ block = ram_block_from_stream(f, flags);
host = host_from_ram_block_offset(block, addr);
if (!host) {
@@ -2874,6 +2890,9 @@ static int ram_load(QEMUFile *f, void *opaque, int
version_id)
}
ramblock_recv_bitmap_set(block, host);
trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host);
+
+ is_pmem = ramblock_is_pmem(block);
+ need_pmem_drain = need_pmem_drain || is_pmem;
}
switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
@@ -2927,7 +2946,7 @@ static int ram_load(QEMUFile *f, void *opaque, int
version_id)
case RAM_SAVE_FLAG_ZERO:
ch = qemu_get_byte(f);
- ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
+ ram_handle_compressed_common(host, ch, TARGET_PAGE_SIZE, is_pmem);
break;
case RAM_SAVE_FLAG_PAGE:
@@ -2970,6 +2989,11 @@ static int ram_load(QEMUFile *f, void *opaque, int
version_id)
}
wait_for_decompress_done();
+
+ if (need_pmem_drain) {
+ pmem_drain();
+ }
+
rcu_read_unlock();
trace_ram_load_complete(ret, seq_iter);
return ret;
--
2.14.1
- [Qemu-devel] [PATCH v2 0/8] nvdimm: guarantee persistence of QEMU writes to persistent memory, Haozhong Zhang, 2018/02/07
- [Qemu-devel] [PATCH v2 2/8] hostmem-file: add the 'pmem' option, Haozhong Zhang, 2018/02/07
- [Qemu-devel] [PATCH v2 1/8] memory, exec: switch file ram allocation functions to 'flags' parameters, Haozhong Zhang, 2018/02/07
- [Qemu-devel] [PATCH v2 3/8] configure: add libpmem support, Haozhong Zhang, 2018/02/07
- [Qemu-devel] [PATCH v2 4/8] mem/nvdimm: ensure write persistence to PMEM in label emulation, Haozhong Zhang, 2018/02/07
- [Qemu-devel] [PATCH v2 5/8] migration/ram: ensure write persistence on loading zero pages to PMEM,
Haozhong Zhang <=
- Re: [Qemu-devel] [PATCH v2 5/8] migration/ram: ensure write persistence on loading zero pages to PMEM, Pankaj Gupta, 2018/02/07
- Re: [Qemu-devel] [PATCH v2 5/8] migration/ram: ensure write persistence on loading zero pages to PMEM, Dr. David Alan Gilbert, 2018/02/07
- Re: [Qemu-devel] [PATCH v2 5/8] migration/ram: ensure write persistence on loading zero pages to PMEM, Haozhong Zhang, 2018/02/07
- Re: [Qemu-devel] [PATCH v2 5/8] migration/ram: ensure write persistence on loading zero pages to PMEM, Haozhong Zhang, 2018/02/07
- Re: [Qemu-devel] [PATCH v2 5/8] migration/ram: ensure write persistence on loading zero pages to PMEM, Dr. David Alan Gilbert, 2018/02/07
- Re: [Qemu-devel] [PATCH v2 5/8] migration/ram: ensure write persistence on loading zero pages to PMEM, Pankaj Gupta, 2018/02/07
- Re: [Qemu-devel] [PATCH v2 5/8] migration/ram: ensure write persistence on loading zero pages to PMEM, Dr. David Alan Gilbert, 2018/02/07
- [Qemu-devel] [PATCH v2 6/8] migration/ram: ensure write persistence on loading normal pages to PMEM, Haozhong Zhang, 2018/02/07