[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH 5/9] accel/tcg: Add tlb_flush_page_bits_by_mmuidx_all_cpus()
From: |
Philippe Mathieu-Daudé |
Subject: |
[PATCH 5/9] accel/tcg: Add tlb_flush_page_bits_by_mmuidx_all_cpus() |
Date: |
Sun, 9 May 2021 17:16:14 +0200 |
From: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20210508201640.1045808-1-richard.henderson@linaro.org>
[PMD: Split from bigger patch]
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
---
include/exec/exec-all.h | 13 +++++++++++++
accel/tcg/cputlb.c | 24 +++++++++++++++++-------
2 files changed, 30 insertions(+), 7 deletions(-)
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 5a5f6d4c1a8..9a3dbb7ec08 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -276,6 +276,12 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced
void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
target_ulong len, uint16_t idxmap,
unsigned bits);
+
+/* Similarly, with broadcast and syncing. */
+void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
+ target_ulong len, uint16_t idxmap,
+ unsigned bits);
+
/**
* tlb_set_page_with_attrs:
* @cpu: CPU to add this TLB entry for
@@ -384,6 +390,13 @@ static inline void tlb_flush_range_by_mmuidx(CPUState
*cpu, target_ulong addr,
unsigned bits)
{
}
+static inline void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu,
+ target_ulong addr,
+ target_ulong len,
+ uint16_t idxmap,
+ unsigned bits)
+{
+}
#endif
/**
* probe_access:
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 16924ceb777..5314349ef9d 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -870,16 +870,18 @@ void tlb_flush_page_bits_by_mmuidx(CPUState *cpu,
target_ulong addr,
tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits);
}
-void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
- target_ulong addr,
- uint16_t idxmap,
- unsigned bits)
+void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu,
+ target_ulong addr, target_ulong len,
+ uint16_t idxmap, unsigned bits)
{
TLBFlushRangeData d;
run_on_cpu_data runon;
- /* If all bits are significant, this devolves to tlb_flush_page. */
- if (bits >= TARGET_LONG_BITS) {
+ /*
+ * If all bits are significant, and len is small,
+ * this devolves to tlb_flush_page.
+ */
+ if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap);
return;
}
@@ -891,7 +893,7 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState
*src_cpu,
/* This should already be page aligned */
d.addr = addr & TARGET_PAGE_MASK;
- d.len = TARGET_PAGE_SIZE;
+ d.len = len;
d.idxmap = idxmap;
d.bits = bits;
@@ -914,6 +916,14 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState
*src_cpu,
tlb_flush_page_bits_by_mmuidx_async_0(src_cpu, d);
}
+void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
+ target_ulong addr,
+ uint16_t idxmap, unsigned bits)
+{
+ tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE,
+ idxmap, bits);
+}
+
void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
target_ulong addr,
uint16_t idxmap,
--
2.26.3
- [PATCH 0/9] accel/tcg: Add tlb_flush interface for a range of pages, Philippe Mathieu-Daudé, 2021/05/09
- [PATCH 1/9] accel/tcg: Replace g_new() + memcpy() by g_memdup(), Philippe Mathieu-Daudé, 2021/05/09
- [PATCH 2/9] accel/tcg: Pass length argument to tlb_flush_range_locked(), Philippe Mathieu-Daudé, 2021/05/09
- [PATCH 3/9] accel/tlb: Rename TLBFlushPageBitsByMMUIdxData -> TLBFlushRangeData, Philippe Mathieu-Daudé, 2021/05/09
- [PATCH 4/9] accel/tcg: Add tlb_flush_range_by_mmuidx(), Philippe Mathieu-Daudé, 2021/05/09
- [PATCH 5/9] accel/tcg: Add tlb_flush_page_bits_by_mmuidx_all_cpus(),
Philippe Mathieu-Daudé <=
- [PATCH 6/9] accel/tlb: Add tlb_flush_range_by_mmuidx_all_cpus_synced(), Philippe Mathieu-Daudé, 2021/05/09
- [PATCH 7/9] accel/tcg: Rename tlb_flush_page_bits -> range]_by_mmuidx_async_0, Philippe Mathieu-Daudé, 2021/05/09
- [PATCH 8/9] accel/tlb: Rename tlb_flush_[page_bits > range]_by_mmuidx_async_[2 > 1], Philippe Mathieu-Daudé, 2021/05/09
- [PATCH 9/9] accel/tcg: Remove tlb_flush_page_bits_by_mmuidx_async_1() ???, Philippe Mathieu-Daudé, 2021/05/09
- Re: [PATCH 0/9] accel/tcg: Add tlb_flush interface for a range of pages, Philippe Mathieu-Daudé, 2021/05/09