qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 9/9] cputlb: Hoist timestamp outside of loops over tlbs


From: Philippe Mathieu-Daudé
Subject: Re: [PATCH 9/9] cputlb: Hoist timestamp outside of loops over tlbs
Date: Mon, 20 Jan 2020 10:02:56 +0100
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:68.0) Gecko/20100101 Thunderbird/68.2.2

On 1/9/20 3:49 AM, Richard Henderson wrote:
Do not call get_clock_realtime() in tlb_mmu_resize_locked,
but hoist outside of any loop over a set of tlbs.  This is
only two (indirect) callers, tlb_flush_by_mmuidx_async_work
and tlb_flush_page_locked, so not onerous.

Signed-off-by: Richard Henderson <address@hidden>

Reviewed-by: Philippe Mathieu-Daudé <address@hidden>

---
  accel/tcg/cputlb.c | 14 ++++++++------
  1 file changed, 8 insertions(+), 6 deletions(-)

diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 761e9d44d7..9f6cb36921 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -137,12 +137,12 @@ static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
   * high), since otherwise we are likely to have a significant amount of
   * conflict misses.
   */
-static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
+static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
+                                  int64_t now)
  {
      size_t old_size = tlb_n_entries(fast);
      size_t rate;
      size_t new_size = old_size;
-    int64_t now = get_clock_realtime();
      int64_t window_len_ms = 100;
      int64_t window_len_ns = window_len_ms * 1000 * 1000;
      bool window_expired = now > desc->window_begin_ns + window_len_ns;
@@ -222,12 +222,13 @@ static void tlb_mmu_flush_locked(CPUTLBDesc *desc, 
CPUTLBDescFast *fast)
      memset(desc->vtable, -1, sizeof(desc->vtable));
  }
-static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
+static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx,
+                                        int64_t now)
  {
      CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
      CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
- tlb_mmu_resize_locked(desc, fast);
+    tlb_mmu_resize_locked(desc, fast, now);
      tlb_mmu_flush_locked(desc, fast);
  }
@@ -310,6 +311,7 @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
      CPUArchState *env = cpu->env_ptr;
      uint16_t asked = data.host_int;
      uint16_t all_dirty, work, to_clean;
+    int64_t now = get_clock_realtime();
assert_cpu_is_self(cpu); @@ -324,7 +326,7 @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) for (work = to_clean; work != 0; work &= work - 1) {
          int mmu_idx = ctz32(work);
-        tlb_flush_one_mmuidx_locked(env, mmu_idx);
+        tlb_flush_one_mmuidx_locked(env, mmu_idx, now);
      }
qemu_spin_unlock(&env_tlb(env)->c.lock);
@@ -446,7 +448,7 @@ static void tlb_flush_page_locked(CPUArchState *env, int 
midx,
          tlb_debug("forcing full flush midx %d ("
                    TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
                    midx, lp_addr, lp_mask);
-        tlb_flush_one_mmuidx_locked(env, midx);
+        tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
      } else {
          if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
              tlb_n_used_entries_dec(env, midx);





reply via email to

[Prev in Thread] Current Thread [Next in Thread]