[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH 2/2] tpm_emulator: Have swtpm relock storage upon migration f
From: |
Marc-André Lureau |
Subject: |
Re: [PATCH 2/2] tpm_emulator: Have swtpm relock storage upon migration fall-back |
Date: |
Thu, 8 Sep 2022 10:04:32 +0400 |
Hi
On Thu, Sep 8, 2022 at 2:28 AM Stefan Berger <stefanb@linux.ibm.com> wrote:
>
> Swtpm may release the lock once the last one of its state blobs has been
> migrated out. In case of VM migration failure QEMU now needs to notify
> swtpm that it should again take the lock, which it can otherwise only do
> once it has received the first TPM command from the VM.
>
> Only try to send the lock command if swtpm supports it. It will not have
> released the lock (and support shared storage setups) if it doesn't
> support the locking command since the functionality of releasing the lock
> upon state blob reception and the lock command were added to swtpm
> 'together'.
>
> If QEMU sends the lock command and the storage has already been locked
> no error is reported.
>
> If swtpm does not receive the lock command (from older version of QEMU),
> it will lock the storage once the first TPM command has been received. So
> sending the lock command is an optimization.
>
> Signed-off-by: Stefan Berger <stefanb@linux.ibm.com>
> ---
> backends/tpm/tpm_emulator.c | 60 ++++++++++++++++++++++++++++++++++++-
> backends/tpm/trace-events | 2 ++
> 2 files changed, 61 insertions(+), 1 deletion(-)
>
> diff --git a/backends/tpm/tpm_emulator.c b/backends/tpm/tpm_emulator.c
> index 87d061e9bb..905ebfc8a9 100644
> --- a/backends/tpm/tpm_emulator.c
> +++ b/backends/tpm/tpm_emulator.c
> @@ -34,6 +34,7 @@
> #include "io/channel-socket.h"
> #include "sysemu/tpm_backend.h"
> #include "sysemu/tpm_util.h"
> +#include "sysemu/runstate.h"
> #include "tpm_int.h"
> #include "tpm_ioctl.h"
> #include "migration/blocker.h"
> @@ -81,6 +82,9 @@ struct TPMEmulator {
> unsigned int established_flag_cached:1;
>
> TPMBlobBuffers state_blobs;
> +
> + bool relock_storage;
> + VMChangeStateEntry *vmstate;
> };
>
> struct tpm_error {
> @@ -302,6 +306,35 @@ static int tpm_emulator_stop_tpm(TPMBackend *tb)
> return 0;
> }
>
> +static int tpm_emulator_lock_storage(TPMEmulator *tpm_emu)
> +{
> + ptm_lockstorage pls;
> +
> + if (!TPM_EMULATOR_IMPLEMENTS_ALL_CAPS(tpm_emu, PTM_CAP_LOCK_STORAGE)) {
> + trace_tpm_emulator_lock_storage_cmd_not_supt();
> + return 0;
> + }
> +
> + /* give failing side 100 * 10ms time to release lock */
> + pls.u.req.retries = cpu_to_be32(100);
That's quite a short time imho. Is it going to try to lock it again
when the first command comes in? What's the timeout then? Is it
handled implicetly by the swtpm process?
> + if (tpm_emulator_ctrlcmd(tpm_emu, CMD_LOCK_STORAGE, &pls,
> + sizeof(pls.u.req), sizeof(pls.u.resp)) < 0) {
> + error_report("tpm-emulator: Could not lock storage: %s",
add "after 1 second" ?
> + strerror(errno));
> + return -1;
> + }
> +
> + pls.u.resp.tpm_result = be32_to_cpu(pls.u.resp.tpm_result);
> + if (pls.u.resp.tpm_result != 0) {
> + error_report("tpm-emulator: TPM result for CMD_LOCK_STORAGE: 0x%x
> %s",
> + pls.u.resp.tpm_result,
> + tpm_emulator_strerror(pls.u.resp.tpm_result));
> + return -1;
> + }
> +
> + return 0;
> +}
> +
> static int tpm_emulator_set_buffer_size(TPMBackend *tb,
> size_t wanted_size,
> size_t *actual_size)
> @@ -843,13 +876,34 @@ static int tpm_emulator_pre_save(void *opaque)
> {
> TPMBackend *tb = opaque;
> TPMEmulator *tpm_emu = TPM_EMULATOR(tb);
> + int ret;
>
> trace_tpm_emulator_pre_save();
>
> tpm_backend_finish_sync(tb);
>
> /* get the state blobs from the TPM */
> - return tpm_emulator_get_state_blobs(tpm_emu);
> + ret = tpm_emulator_get_state_blobs(tpm_emu);
> +
> + tpm_emu->relock_storage = ret == 0;
> +
> + return ret;
> +}
> +
> +static void tpm_emulator_vm_state_change(void *opaque, bool running,
> + RunState state)
> +{
> + TPMBackend *tb = opaque;
> + TPMEmulator *tpm_emu = TPM_EMULATOR(tb);
> +
> + trace_tpm_emulator_vm_state_change(running, state);
> +
> + if (!running || state != RUN_STATE_RUNNING || !tpm_emu->relock_storage) {
> + return;
> + }
> +
> + /* lock storage after migration fall-back */
> + tpm_emulator_lock_storage(tpm_emu);
> }
>
> /*
> @@ -911,6 +965,9 @@ static void tpm_emulator_inst_init(Object *obj)
> tpm_emu->options = g_new0(TPMEmulatorOptions, 1);
> tpm_emu->cur_locty_number = ~0;
> qemu_mutex_init(&tpm_emu->mutex);
> + tpm_emu->vmstate =
> + qemu_add_vm_change_state_handler(tpm_emulator_vm_state_change,
> + tpm_emu);
>
> vmstate_register(NULL, VMSTATE_INSTANCE_ID_ANY,
> &vmstate_tpm_emulator, obj);
> @@ -960,6 +1017,7 @@ static void tpm_emulator_inst_finalize(Object *obj)
> tpm_sized_buffer_reset(&state_blobs->savestate);
>
> qemu_mutex_destroy(&tpm_emu->mutex);
> + qemu_del_vm_change_state_handler(tpm_emu->vmstate);
>
> vmstate_unregister(NULL, &vmstate_tpm_emulator, obj);
> }
> diff --git a/backends/tpm/trace-events b/backends/tpm/trace-events
> index 3298766dd7..1ecef42a07 100644
> --- a/backends/tpm/trace-events
> +++ b/backends/tpm/trace-events
> @@ -20,6 +20,8 @@ tpm_emulator_set_buffer_size(uint32_t buffersize, uint32_t
> minsize, uint32_t max
> tpm_emulator_startup_tpm_resume(bool is_resume, size_t buffersize)
> "is_resume: %d, buffer size: %zu"
> tpm_emulator_get_tpm_established_flag(uint8_t flag) "got established flag:
> %d"
> tpm_emulator_cancel_cmd_not_supt(void) "Backend does not support
> CANCEL_TPM_CMD"
> +tpm_emulator_lock_storage_cmd_not_supt(void) "Backend does not support
> LOCK_STORAGE"
> +tpm_emulator_vm_state_change(int running, int state) "state change to
> running %d state %d"
> tpm_emulator_handle_device_opts_tpm12(void) "TPM Version 1.2"
> tpm_emulator_handle_device_opts_tpm2(void) "TPM Version 2"
> tpm_emulator_handle_device_opts_unspec(void) "TPM Version Unspecified"
> --
> 2.37.2
>
lgtm otherwise:
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>