On 08/16/2011 08:56 PM, Umesh Deshpande wrote:
@@ -2128,8 +2132,61 @@ void
cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
start1, length);
}
}
+
}
+void migration_bitmap_reset_dirty(ram_addr_t start, ram_addr_t end,
+ int dirty_flags)
+{
+ unsigned long length, start1;
+
+ start &= TARGET_PAGE_MASK;
+ end = TARGET_PAGE_ALIGN(end);
+
+ length = end - start;
+ if (length == 0) {
+ return;
+ }
+
+ migration_bitmap_mask_dirty_range(start, length, dirty_flags);
+
+ /* we modify the TLB cache so that the dirty bit will be set again
+ when accessing the range */
The comment does not apply here, and the code below can also be safely
deleted.
+ start1 = (unsigned long)qemu_safe_ram_ptr(start);
+ /* Check that we don't span multiple blocks - this breaks the
+ address comparisons below. */
+ if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
+ != (end - 1) - start) {
+ abort();
+ }
+}
+
+void sync_migration_bitmap(ram_addr_t start, ram_addr_t end)
+{
+ unsigned long length, len, i;
+ ram_addr_t addr;
+ start &= TARGET_PAGE_MASK;
+ end = TARGET_PAGE_ALIGN(end);
+
+ length = end - start;
+ if (length == 0) {
+ return;
+ }
+
+ len = length >> TARGET_PAGE_BITS;
+ for (i = 0; i < len; i++) {
+ addr = i << TARGET_PAGE_BITS;
+ if (cpu_physical_memory_get_dirty(addr,
MIGRATION_DIRTY_FLAG)) {
+ migration_bitmap_set_dirty(addr);
+ cpu_physical_memory_reset_dirty(addr, addr +
TARGET_PAGE_SIZE,
+ MIGRATION_DIRTY_FLAG);
This should be run under the iothread lock. Pay attention to avoiding
lock inversion: the I/O thread always takes the iothread lock outside
and the ramlist lock within, so the migration thread must do the same.
BTW, I think this code in the migration thread patch also needs the
iothread lock:
if (stage < 0) {
cpu_physical_memory_set_dirty_tracking(0);
return 0;
}
if (cpu_physical_sync_dirty_bitmap(0, TARGET_PHYS_ADDR_MAX) != 0) {
qemu_file_set_error(f);
return 0;
}