[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH v5 15/65] i386/tdx: Get tdx_capabilities via KVM_TDX_CAPABILI
From: |
Wang, Lei |
Subject: |
Re: [PATCH v5 15/65] i386/tdx: Get tdx_capabilities via KVM_TDX_CAPABILITIES |
Date: |
Tue, 19 Mar 2024 10:43:04 +0800 |
User-agent: |
Mozilla Thunderbird |
On 2/29/2024 14:36, Xiaoyao Li wrote:> KVM provides TDX capabilities via sub
command KVM_TDX_CAPABILITIES of
> IOCTL(KVM_MEMORY_ENCRYPT_OP). Get the capabilities when initializing
> TDX context. It will be used to validate user's setting later.
>
> Since there is no interface reporting how many cpuid configs contains in
> KVM_TDX_CAPABILITIES, QEMU chooses to try starting with a known number
> and abort when it exceeds KVM_MAX_CPUID_ENTRIES.
>
> Besides, introduce the interfaces to invoke TDX "ioctls" at different
> scope (KVM, VM and VCPU) in preparation.
tdx_platform_ioctl() is dropped because no user so suggest rephrasing this
statement because no KVM scope ioctl interface is introduced in this patch.
>
> Signed-off-by: Xiaoyao Li <xiaoyao.li@intel.com>
> ---
> Changes in v4:
> - use {} to initialize struct kvm_tdx_cmd, to avoid memset();
> - remove tdx_platform_ioctl() because no user;
>
> Changes in v3:
> - rename __tdx_ioctl() to tdx_ioctl_internal()
> - Pass errp in get_tdx_capabilities();
>
> changes in v2:
> - Make the error message more clear;
>
> changes in v1:
> - start from nr_cpuid_configs = 6 for the loop;
> - stop the loop when nr_cpuid_configs exceeds KVM_MAX_CPUID_ENTRIES;
> ---
> target/i386/kvm/kvm.c | 2 -
> target/i386/kvm/kvm_i386.h | 2 +
> target/i386/kvm/tdx.c | 91 +++++++++++++++++++++++++++++++++++++-
> 3 files changed, 92 insertions(+), 3 deletions(-)
>
> diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
> index 52d99d30bdc8..0e68e80f4291 100644
> --- a/target/i386/kvm/kvm.c
> +++ b/target/i386/kvm/kvm.c
> @@ -1685,8 +1685,6 @@ static int hyperv_init_vcpu(X86CPU *cpu)
>
> static Error *invtsc_mig_blocker;
>
> -#define KVM_MAX_CPUID_ENTRIES 100
> -
> static void kvm_init_xsave(CPUX86State *env)
> {
> if (has_xsave2) {
> diff --git a/target/i386/kvm/kvm_i386.h b/target/i386/kvm/kvm_i386.h
> index 55fb25fa8e2e..c3ef46a97a7b 100644
> --- a/target/i386/kvm/kvm_i386.h
> +++ b/target/i386/kvm/kvm_i386.h
> @@ -13,6 +13,8 @@
>
> #include "sysemu/kvm.h"
>
> +#define KVM_MAX_CPUID_ENTRIES 100
> +
> #ifdef CONFIG_KVM
>
> #define kvm_pit_in_kernel() \
> diff --git a/target/i386/kvm/tdx.c b/target/i386/kvm/tdx.c
> index d9a1dd46dc69..2b956450a083 100644
> --- a/target/i386/kvm/tdx.c
> +++ b/target/i386/kvm/tdx.c
> @@ -12,18 +12,107 @@
> */
>
> #include "qemu/osdep.h"
> +#include "qemu/error-report.h"
> +#include "qapi/error.h"
> #include "qom/object_interfaces.h"
> +#include "sysemu/kvm.h"
>
> #include "hw/i386/x86.h"
> +#include "kvm_i386.h"
> #include "tdx.h"
>
> +static struct kvm_tdx_capabilities *tdx_caps;
> +
> +enum tdx_ioctl_level{
> + TDX_VM_IOCTL,
> + TDX_VCPU_IOCTL,
> +};
> +
> +static int tdx_ioctl_internal(void *state, enum tdx_ioctl_level level, int
> cmd_id,
> + __u32 flags, void *data)
> +{
> + struct kvm_tdx_cmd tdx_cmd = {};
> + int r;
> +
> + tdx_cmd.id = cmd_id;
> + tdx_cmd.flags = flags;
> + tdx_cmd.data = (__u64)(unsigned long)data;
> +
> + switch (level) {
> + case TDX_VM_IOCTL:
> + r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_OP, &tdx_cmd);
> + break;
> + case TDX_VCPU_IOCTL:
> + r = kvm_vcpu_ioctl(state, KVM_MEMORY_ENCRYPT_OP, &tdx_cmd);
> + break;
> + default:
> + error_report("Invalid tdx_ioctl_level %d", level);
> + exit(1);
> + }
> +
> + return r;
> +}
> +
> +static inline int tdx_vm_ioctl(int cmd_id, __u32 flags, void *data)
> +{
> + return tdx_ioctl_internal(NULL, TDX_VM_IOCTL, cmd_id, flags, data);
> +}
> +
> +static inline int tdx_vcpu_ioctl(void *vcpu_fd, int cmd_id, __u32 flags,
> + void *data)
> +{
> + return tdx_ioctl_internal(vcpu_fd, TDX_VCPU_IOCTL, cmd_id, flags, data);
> +}
> +
> +static int get_tdx_capabilities(Error **errp)
> +{
> + struct kvm_tdx_capabilities *caps;
> + /* 1st generation of TDX reports 6 cpuid configs */
> + int nr_cpuid_configs = 6;
> + size_t size;
> + int r;
> +
> + do {
> + size = sizeof(struct kvm_tdx_capabilities) +
> + nr_cpuid_configs * sizeof(struct kvm_tdx_cpuid_config);
> + caps = g_malloc0(size);
> + caps->nr_cpuid_configs = nr_cpuid_configs;
> +
> + r = tdx_vm_ioctl(KVM_TDX_CAPABILITIES, 0, caps);
> + if (r == -E2BIG) {
> + g_free(caps);
> + nr_cpuid_configs *= 2;
> + if (nr_cpuid_configs > KVM_MAX_CPUID_ENTRIES) {
> + error_setg(errp, "%s: KVM TDX seems broken that number of
> CPUID "
> + "entries in kvm_tdx_capabilities exceeds limit
> %d",
> + __func__, KVM_MAX_CPUID_ENTRIES);
> + return r;
> + }
> + } else if (r < 0) {
> + g_free(caps);
> + error_setg_errno(errp, -r, "%s: KVM_TDX_CAPABILITIES failed",
> __func__);
> + return r;
> + }
> + }
> + while (r == -E2BIG);
> +
> + tdx_caps = caps;
> +
> + return 0;
> +}
> +
> static int tdx_kvm_init(ConfidentialGuestSupport *cgs, Error **errp)
> {
> MachineState *ms = MACHINE(qdev_get_machine());
> + int r = 0;
>
> ms->require_guest_memfd = true;
>
> - return 0;
> + if (!tdx_caps) {
> + r = get_tdx_capabilities(errp);
> + }
> +
> + return r;
> }
>
> /* tdx guest */