[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v2 6/9] hw/arm/smmu-common: Manage IOTLB block entries
From: |
Eric Auger |
Subject: |
[PATCH v2 6/9] hw/arm/smmu-common: Manage IOTLB block entries |
Date: |
Thu, 2 Jul 2020 17:26:56 +0200 |
At the moment each entry in the IOTLB corresponds to a page sized
mapping (4K, 16K or 64K), even if the page belongs to a mapped
block. In case of block mapping this unefficiently consumes IOTLB
entries.
Change the value of the entry so that it reflects the actual
mapping it belongs to (block or page start address and size).
Also the level/tg of the entry is encoded in the key. In subsequent
patches we will enable range invalidation. This latter is able
to provide the level/tg of the entry.
Encoding the level/tg directly in the key will allow to invalidate
using g_hash_table_remove() when num_pages equals to 1.
Signed-off-by: Eric Auger <eric.auger@redhat.com>
---
v1 -> v2:
- recompute starting_level
---
hw/arm/smmu-internal.h | 9 ++++++
include/hw/arm/smmu-common.h | 8 +++--
hw/arm/smmu-common.c | 63 ++++++++++++++++++++++++++++--------
hw/arm/smmuv3.c | 6 ++--
hw/arm/trace-events | 2 +-
5 files changed, 67 insertions(+), 21 deletions(-)
diff --git a/hw/arm/smmu-internal.h b/hw/arm/smmu-internal.h
index 2ecb6f1dc6..d57f5c05c2 100644
--- a/hw/arm/smmu-internal.h
+++ b/hw/arm/smmu-internal.h
@@ -97,7 +97,16 @@ uint64_t iova_level_offset(uint64_t iova, int inputsize,
}
#define SMMU_IOTLB_ASID_SHIFT 40
+#define SMMU_IOTLB_LEVEL_SHIFT 56
+#define SMMU_IOTLB_TG_SHIFT 58
#define SMMU_IOTLB_ASID(key) (((key) >> SMMU_IOTLB_ASID_SHIFT) & 0xFFFF)
#define SMMU_IOTLB_IOVA(key) (((key) & MAKE_64BIT_MASK(0, 40)) << 12)
+
+typedef struct SMMUIOTLBPageInvInfo {
+ int asid;
+ uint64_t iova;
+ uint64_t mask;
+} SMMUIOTLBPageInvInfo;
+
#endif
diff --git a/include/hw/arm/smmu-common.h b/include/hw/arm/smmu-common.h
index 0db4403e94..cc228b51b4 100644
--- a/include/hw/arm/smmu-common.h
+++ b/include/hw/arm/smmu-common.h
@@ -156,12 +156,14 @@ IOMMUMemoryRegion *smmu_iommu_mr(SMMUState *s, uint32_t
sid);
#define SMMU_IOTLB_MAX_SIZE 256
-SMMUTLBEntry *smmu_iotlb_lookup(SMMUState *bs, SMMUTransCfg *cfg, hwaddr iova);
+SMMUTLBEntry *smmu_iotlb_lookup(SMMUState *bs, SMMUTransCfg *cfg,
+ SMMUTransTableInfo *tt, hwaddr iova);
void smmu_iotlb_insert(SMMUState *bs, SMMUTransCfg *cfg, SMMUTLBEntry *entry);
-SMMUIOTLBKey smmu_get_iotlb_key(uint16_t asid, uint64_t iova);
+SMMUIOTLBKey smmu_get_iotlb_key(uint16_t asid, uint64_t iova,
+ uint8_t tg, uint8_t level);
void smmu_iotlb_inv_all(SMMUState *s);
void smmu_iotlb_inv_asid(SMMUState *s, uint16_t asid);
-void smmu_iotlb_inv_iova(SMMUState *s, uint16_t asid, dma_addr_t iova);
+void smmu_iotlb_inv_iova(SMMUState *s, int asid, dma_addr_t iova);
/* Unmap the range of all the notifiers registered to any IOMMU mr */
void smmu_inv_notifiers_all(SMMUState *s);
diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c
index a3301e56e7..5c14127a24 100644
--- a/hw/arm/smmu-common.c
+++ b/hw/arm/smmu-common.c
@@ -42,16 +42,35 @@ static gboolean smmu_iotlb_key_equal(gconstpointer v1,
gconstpointer v2)
return *((const uint64_t *)v1) == *((const uint64_t *)v2);
}
-SMMUIOTLBKey smmu_get_iotlb_key(uint16_t asid, uint64_t iova)
+SMMUIOTLBKey smmu_get_iotlb_key(uint16_t asid, uint64_t iova,
+ uint8_t tg, uint8_t level)
{
- return iova >> 12 | (uint64_t)(asid) << SMMU_IOTLB_ASID_SHIFT;
+ return iova >> 12 | (uint64_t)(asid) << SMMU_IOTLB_ASID_SHIFT |
+ (uint64_t)(level) << SMMU_IOTLB_LEVEL_SHIFT |
+ (uint64_t)(tg) << SMMU_IOTLB_TG_SHIFT;
}
SMMUTLBEntry *smmu_iotlb_lookup(SMMUState *bs, SMMUTransCfg *cfg,
- hwaddr iova)
+ SMMUTransTableInfo *tt, hwaddr iova)
{
- SMMUIOTLBKey key = smmu_get_iotlb_key(cfg->asid, iova);
- SMMUTLBEntry *entry = g_hash_table_lookup(bs->iotlb, &key);
+ uint8_t tg = (tt->granule_sz - 10) / 2;
+ uint8_t inputsize = 64 - tt->tsz;
+ uint8_t stride = tt->granule_sz - 3;
+ uint8_t level = 4 - (inputsize - 4) / stride;
+ SMMUTLBEntry *entry = NULL;
+
+ while (level <= 3) {
+ uint64_t subpage_size = 1ULL << level_shift(level, tt->granule_sz);
+ uint64_t mask = subpage_size - 1;
+ SMMUIOTLBKey key;
+
+ key = smmu_get_iotlb_key(cfg->asid, iova & ~mask, tg, level);
+ entry = g_hash_table_lookup(bs->iotlb, &key);
+ if (entry) {
+ break;
+ }
+ level++;
+ }
if (entry) {
cfg->iotlb_hits++;
@@ -72,13 +91,14 @@ SMMUTLBEntry *smmu_iotlb_lookup(SMMUState *bs, SMMUTransCfg
*cfg,
void smmu_iotlb_insert(SMMUState *bs, SMMUTransCfg *cfg, SMMUTLBEntry *new)
{
SMMUIOTLBKey *key = g_new0(SMMUIOTLBKey, 1);
+ uint8_t tg = (new->granule - 10) / 2;
if (g_hash_table_size(bs->iotlb) >= SMMU_IOTLB_MAX_SIZE) {
smmu_iotlb_inv_all(bs);
}
- *key = smmu_get_iotlb_key(cfg->asid, new->entry.iova);
- trace_smmu_iotlb_insert(cfg->asid, new->entry.iova);
+ *key = smmu_get_iotlb_key(cfg->asid, new->entry.iova, tg, new->level);
+ trace_smmu_iotlb_insert(cfg->asid, new->entry.iova, tg, new->level);
g_hash_table_insert(bs->iotlb, key, new);
}
@@ -97,12 +117,28 @@ static gboolean smmu_hash_remove_by_asid(gpointer key,
gpointer value,
return SMMU_IOTLB_ASID(*iotlb_key) == asid;
}
-inline void smmu_iotlb_inv_iova(SMMUState *s, uint16_t asid, dma_addr_t iova)
+static gboolean smmu_hash_remove_by_asid_iova(gpointer key, gpointer value,
+ gpointer user_data)
{
- SMMUIOTLBKey key = smmu_get_iotlb_key(asid, iova);
+ SMMUTLBEntry *iter = (SMMUTLBEntry *)value;
+ IOMMUTLBEntry *entry = &iter->entry;
+ SMMUIOTLBPageInvInfo *info = (SMMUIOTLBPageInvInfo *)user_data;
+ uint64_t *iotlb_key = (uint64_t *)key;
+
+ if (info->asid >= 0) {
+ return (info->asid == SMMU_IOTLB_ASID(*iotlb_key)) &&
+ ((info->iova & ~entry->addr_mask) == entry->iova);
+ } else {
+ return (info->iova & ~entry->addr_mask) == entry->iova;
+ }
+}
+
+inline void smmu_iotlb_inv_iova(SMMUState *s, int asid, dma_addr_t iova)
+{
+ SMMUIOTLBPageInvInfo info = {.asid = asid, .iova = iova};
trace_smmu_iotlb_inv_iova(asid, iova);
- g_hash_table_remove(s->iotlb, &key);
+ g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_asid_iova,
&info);
}
inline void smmu_iotlb_inv_asid(SMMUState *s, uint16_t asid)
@@ -229,9 +265,6 @@ static int smmu_ptw_64(SMMUTransCfg *cfg,
baseaddr = extract64(tt->ttb, 0, 48);
baseaddr &= ~indexmask;
- tlbe->entry.iova = iova;
- tlbe->entry.addr_mask = (1 << granule_sz) - 1;
-
while (level <= 3) {
uint64_t subpage_size = 1ULL << level_shift(level, granule_sz);
uint64_t mask = subpage_size - 1;
@@ -281,7 +314,9 @@ static int smmu_ptw_64(SMMUTransCfg *cfg,
goto error;
}
- tlbe->entry.translated_addr = gpa + (iova & mask);
+ tlbe->entry.translated_addr = gpa;
+ tlbe->entry.iova = iova & ~mask;
+ tlbe->entry.addr_mask = mask;
tlbe->entry.perm = PTE_AP_TO_PERM(ap);
tlbe->level = level;
tlbe->granule = granule_sz;
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
index db74d27add..b717bde832 100644
--- a/hw/arm/smmuv3.c
+++ b/hw/arm/smmuv3.c
@@ -674,7 +674,7 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion
*mr, hwaddr addr,
page_mask = (1ULL << (tt->granule_sz)) - 1;
aligned_addr = addr & ~page_mask;
- cached_entry = smmu_iotlb_lookup(bs, cfg, aligned_addr);
+ cached_entry = smmu_iotlb_lookup(bs, cfg, tt, aligned_addr);
if (cached_entry) {
if ((flag & IOMMU_WO) && !(cached_entry->entry.perm & IOMMU_WO)) {
status = SMMU_TRANS_ERROR;
@@ -744,7 +744,7 @@ epilogue:
case SMMU_TRANS_SUCCESS:
entry.perm = flag;
entry.translated_addr = cached_entry->entry.translated_addr +
- (addr & page_mask);
+ (addr & cached_entry->entry.addr_mask);
entry.addr_mask = cached_entry->entry.addr_mask;
trace_smmuv3_translate_success(mr->parent_obj.name, sid, addr,
entry.translated_addr, entry.perm);
@@ -972,7 +972,7 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
trace_smmuv3_cmdq_tlbi_nh_vaa(vmid, addr);
smmuv3_inv_notifiers_iova(bs, -1, addr);
- smmu_iotlb_inv_all(bs);
+ smmu_iotlb_inv_iova(bs, -1, addr);
break;
}
case SMMU_CMD_TLBI_NH_VA:
diff --git a/hw/arm/trace-events b/hw/arm/trace-events
index b808a1bfc1..f74d3e920f 100644
--- a/hw/arm/trace-events
+++ b/hw/arm/trace-events
@@ -16,7 +16,7 @@ smmu_iotlb_inv_iova(uint16_t asid, uint64_t addr) "IOTLB
invalidate asid=%d addr
smmu_inv_notifiers_mr(const char *name) "iommu mr=%s"
smmu_iotlb_lookup_hit(uint16_t asid, uint64_t addr, uint32_t hit, uint32_t
miss, uint32_t p) "IOTLB cache HIT asid=%d addr=0x%"PRIx64" hit=%d miss=%d hit
rate=%d"
smmu_iotlb_lookup_miss(uint16_t asid, uint64_t addr, uint32_t hit, uint32_t
miss, uint32_t p) "IOTLB cache MISS asid=%d addr=0x%"PRIx64" hit=%d miss=%d hit
rate=%d"
-smmu_iotlb_insert(uint16_t asid, uint64_t addr) "IOTLB ++ asid=%d
addr=0x%"PRIx64
+smmu_iotlb_insert(uint16_t asid, uint64_t addr, uint8_t tg, uint8_t level)
"IOTLB ++ asid=%d addr=0x%"PRIx64" tg=%d level=%d"
# smmuv3.c
smmuv3_read_mmio(uint64_t addr, uint64_t val, unsigned size, uint32_t r)
"addr: 0x%"PRIx64" val:0x%"PRIx64" size: 0x%x(%d)"
--
2.21.3
- [PATCH v2 0/9] SMMUv3.2 Range-based TLB Invalidation Support, Eric Auger, 2020/07/02
- [PATCH v2 2/9] hw/arm/smmu-common: Add IOTLB helpers, Eric Auger, 2020/07/02
- [PATCH v2 1/9] hw/arm/smmu-common: Factorize some code in smmu_ptw_64(), Eric Auger, 2020/07/02
- [PATCH v2 3/9] hw/arm/smmu: Introduce smmu_get_iotlb_key(), Eric Auger, 2020/07/02
- [PATCH v2 4/9] hw/arm/smmu: Simplify the IOTLB key format, Eric Auger, 2020/07/02
- [PATCH v2 5/9] hw/arm/smmu: Introduce SMMUTLBEntry for PTW and IOTLB value, Eric Auger, 2020/07/02
- [PATCH v2 6/9] hw/arm/smmu-common: Manage IOTLB block entries,
Eric Auger <=
- [PATCH v2 7/9] hw/arm/smmuv3: Introduce smmuv3_s1_range_inval() helper, Eric Auger, 2020/07/02
- [PATCH v2 8/9] hw/arm/smmuv3: Get prepared for range invalidation, Eric Auger, 2020/07/02
- [PATCH v2 9/9] hw/arm/smmuv3: Advertise SMMUv3.2 range invalidation, Eric Auger, 2020/07/02