[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v10 20/21] i386/cpu: Use CPUCacheInfo.share_level to encode CPUID
From: |
Zhao Liu |
Subject: |
[PATCH v10 20/21] i386/cpu: Use CPUCacheInfo.share_level to encode CPUID[4] |
Date: |
Thu, 21 Mar 2024 22:40:47 +0800 |
From: Zhao Liu <zhao1.liu@intel.com>
CPUID[4].EAX[bits 25:14] is used to represent the cache topology for
Intel CPUs.
After cache models have topology information, we can use
CPUCacheInfo.share_level to decide which topology level to be encoded
into CPUID[4].EAX[bits 25:14].
And since with the helper max_processor_ids_for_cache(), the filed
CPUID[4].EAX[bits 25:14] (original virable "num_apic_ids") is parsed
based on cpu topology levels, which are verified when parsing -smp, it's
no need to check this value by "assert(num_apic_ids > 0)" again, so
remove this assert().
Additionally, wrap the encoding of CPUID[4].EAX[bits 31:26] into a
helper to make the code cleaner.
Tested-by: Yongwei Ma <yongwei.ma@intel.com>
Signed-off-by: Zhao Liu <zhao1.liu@intel.com>
Tested-by: Babu Moger <babu.moger@amd.com>
---
Changes since v7:
* Renamed max_processor_ids_for_cache() to max_thread_ids_for_cache().
(Xiaoyao)
* Dropped Michael/Babu's ACKed/Tested tags since the code change.
* Re-added Yongwei's Tested tag For his re-testing.
Changes since v1:
* Used "enum CPUTopoLevel share_level" as the parameter in
max_processor_ids_for_cache().
* Made cache_into_passthrough case also use
max_processor_ids_for_cache() and max_core_ids_in_package() to
encode CPUID[4]. (Yanan)
* Renamed the title of this patch (the original is "i386: Use
CPUCacheInfo.share_level to encode CPUID[4].EAX[bits 25:14]").
---
target/i386/cpu.c | 84 +++++++++++++++++++++++++----------------------
1 file changed, 45 insertions(+), 39 deletions(-)
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 5f6f72fc849f..831957e4a06f 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -235,22 +235,53 @@ static uint8_t cpuid2_cache_descriptor(CPUCacheInfo
*cache)
((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
0 /* Invalid value */)
+static uint32_t max_thread_ids_for_cache(X86CPUTopoInfo *topo_info,
+ enum CPUTopoLevel share_level)
+{
+ uint32_t num_ids = 0;
+
+ switch (share_level) {
+ case CPU_TOPO_LEVEL_CORE:
+ num_ids = 1 << apicid_core_offset(topo_info);
+ break;
+ case CPU_TOPO_LEVEL_DIE:
+ num_ids = 1 << apicid_die_offset(topo_info);
+ break;
+ case CPU_TOPO_LEVEL_PACKAGE:
+ num_ids = 1 << apicid_pkg_offset(topo_info);
+ break;
+ default:
+ /*
+ * Currently there is no use case for SMT and MODULE, so use
+ * assert directly to facilitate debugging.
+ */
+ g_assert_not_reached();
+ }
+
+ return num_ids - 1;
+}
+
+static uint32_t max_core_ids_in_package(X86CPUTopoInfo *topo_info)
+{
+ uint32_t num_cores = 1 << (apicid_pkg_offset(topo_info) -
+ apicid_core_offset(topo_info));
+ return num_cores - 1;
+}
/* Encode cache info for CPUID[4] */
static void encode_cache_cpuid4(CPUCacheInfo *cache,
- int num_apic_ids, int num_cores,
+ X86CPUTopoInfo *topo_info,
uint32_t *eax, uint32_t *ebx,
uint32_t *ecx, uint32_t *edx)
{
assert(cache->size == cache->line_size * cache->associativity *
cache->partitions * cache->sets);
- assert(num_apic_ids > 0);
*eax = CACHE_TYPE(cache->type) |
CACHE_LEVEL(cache->level) |
(cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
- ((num_cores - 1) << 26) |
- ((num_apic_ids - 1) << 14);
+ (max_core_ids_in_package(topo_info) << 26) |
+ (max_thread_ids_for_cache(topo_info, cache->share_level) << 14);
assert(cache->line_size > 0);
assert(cache->partitions > 0);
@@ -6244,18 +6275,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index,
uint32_t count,
(cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) <<
8) |
(cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
break;
- case 4: {
- /*
- * CPUID.04H:EAX[bits 25:14]: Maximum number of addressable IDs for
- * logical processors sharing this cache.
- */
- int addressable_threads_width;
- /*
- * CPUID.04H:EAX[bits 31:26]: Maximum number of addressable IDs for
- * processor cores in the physical package.
- */
- int addressable_cores_width;
-
+ case 4:
/* cache info: needed for Core compatibility */
if (cpu->cache_info_passthrough) {
x86_cpu_get_cache_cpuid(index, count, eax, ebx, ecx, edx);
@@ -6267,55 +6287,42 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index,
uint32_t count,
int host_vcpus_per_cache = 1 + ((*eax & 0x3FFC000) >> 14);
if (cores_per_pkg > 1) {
- addressable_cores_width = apicid_pkg_offset(&topo_info) -
- apicid_core_offset(&topo_info);
-
*eax &= ~0xFC000000;
- *eax |= ((1 << addressable_cores_width) - 1) << 26;
+ *eax |= max_core_ids_in_package(&topo_info) << 26;
}
if (host_vcpus_per_cache > threads_per_pkg) {
- /* Share the cache at package level. */
- addressable_threads_width = apicid_pkg_offset(&topo_info);
-
*eax &= ~0x3FFC000;
- *eax |= ((1 << addressable_threads_width) - 1) << 14;
+
+ /* Share the cache at package level. */
+ *eax |= max_thread_ids_for_cache(&topo_info,
+ CPU_TOPO_LEVEL_PACKAGE) << 14;
}
}
} else if (cpu->vendor_cpuid_only && IS_AMD_CPU(env)) {
*eax = *ebx = *ecx = *edx = 0;
} else {
*eax = 0;
- addressable_cores_width = apicid_pkg_offset(&topo_info) -
- apicid_core_offset(&topo_info);
switch (count) {
case 0: /* L1 dcache info */
- addressable_threads_width = apicid_core_offset(&topo_info);
encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
- (1 << addressable_threads_width),
- (1 << addressable_cores_width),
+ &topo_info,
eax, ebx, ecx, edx);
break;
case 1: /* L1 icache info */
- addressable_threads_width = apicid_core_offset(&topo_info);
encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
- (1 << addressable_threads_width),
- (1 << addressable_cores_width),
+ &topo_info,
eax, ebx, ecx, edx);
break;
case 2: /* L2 cache info */
- addressable_threads_width = apicid_core_offset(&topo_info);
encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
- (1 << addressable_threads_width),
- (1 << addressable_cores_width),
+ &topo_info,
eax, ebx, ecx, edx);
break;
case 3: /* L3 cache info */
if (cpu->enable_l3_cache) {
- addressable_threads_width = apicid_die_offset(&topo_info);
encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
- (1 << addressable_threads_width),
- (1 << addressable_cores_width),
+ &topo_info,
eax, ebx, ecx, edx);
break;
}
@@ -6326,7 +6333,6 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index,
uint32_t count,
}
}
break;
- }
case 5:
/* MONITOR/MWAIT Leaf */
*eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */
--
2.34.1
- [PATCH v10 12/21] i386: Introduce module level cpu topology to CPUX86State, (continued)
- [PATCH v10 12/21] i386: Introduce module level cpu topology to CPUX86State, Zhao Liu, 2024/03/21
- [PATCH v10 11/21] i386/cpu: Decouple CPUID[0x1F] subleaf with specific topology level, Zhao Liu, 2024/03/21
- [PATCH v10 13/21] i386: Support modules_per_die in X86CPUTopoInfo, Zhao Liu, 2024/03/21
- [PATCH v10 14/21] i386: Expose module level in CPUID[0x1F], Zhao Liu, 2024/03/21
- [PATCH v10 17/21] tests: Add test case of APIC ID for module level parsing, Zhao Liu, 2024/03/21
- [PATCH v10 15/21] i386: Support module_id in X86CPUTopoIDs, Zhao Liu, 2024/03/21
- [PATCH v10 18/21] hw/i386/pc: Support smp.modules for x86 PC machine, Zhao Liu, 2024/03/21
- [PATCH v10 16/21] i386/cpu: Introduce module-id to X86CPU, Zhao Liu, 2024/03/21
- [PATCH v10 19/21] i386: Add cache topology info in CPUCacheInfo, Zhao Liu, 2024/03/21
- [PATCH v10 21/21] i386/cpu: Use CPUCacheInfo.share_level to encode CPUID[0x8000001D].EAX[bits 25:14], Zhao Liu, 2024/03/21
- [PATCH v10 20/21] i386/cpu: Use CPUCacheInfo.share_level to encode CPUID[4],
Zhao Liu <=