qemu-arm
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH for-8.2 2/2] arm/kvm: convert to kvm_get_one_reg


From: Gavin Shan
Subject: Re: [PATCH for-8.2 2/2] arm/kvm: convert to kvm_get_one_reg
Date: Mon, 24 Jul 2023 12:35:25 +1000
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:102.0) Gecko/20100101 Thunderbird/102.12.0

Hi Connie,

On 7/18/23 21:14, Cornelia Huck wrote:
We can neaten the code by switching the callers that work on a
CPUstate to the kvm_get_one_reg function.

Signed-off-by: Cornelia Huck <cohuck@redhat.com>
---
  target/arm/kvm.c   | 15 +++---------
  target/arm/kvm64.c | 57 ++++++++++++----------------------------------
  2 files changed, 18 insertions(+), 54 deletions(-)


The replacements look good to me. However, I guess it's worty to apply
the same replacements for target/arm/kvm64.c since we're here?

[gshan@gshan arm]$ pwd
/home/gshan/sandbox/q/target/arm
[gshan@gshan arm]$ git grep KVM_GET_ONE_REG
kvm64.c:    err = ioctl(fd, KVM_GET_ONE_REG, &idreg);
kvm64.c:    return ioctl(fd, KVM_GET_ONE_REG, &idreg);
kvm64.c:        ret = ioctl(fdarray[2], KVM_GET_ONE_REG, &reg);

Thanks,
Gavin

diff --git a/target/arm/kvm.c b/target/arm/kvm.c
index cdbffc3c6e0d..4123f6dc9d72 100644
--- a/target/arm/kvm.c
+++ b/target/arm/kvm.c
@@ -525,24 +525,19 @@ bool write_kvmstate_to_list(ARMCPU *cpu)
      bool ok = true;
for (i = 0; i < cpu->cpreg_array_len; i++) {
-        struct kvm_one_reg r;
          uint64_t regidx = cpu->cpreg_indexes[i];
          uint32_t v32;
          int ret;
- r.id = regidx;
-
          switch (regidx & KVM_REG_SIZE_MASK) {
          case KVM_REG_SIZE_U32:
-            r.addr = (uintptr_t)&v32;
-            ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
+            ret = kvm_get_one_reg(cs, regidx, &v32);
              if (!ret) {
                  cpu->cpreg_values[i] = v32;
              }
              break;
          case KVM_REG_SIZE_U64:
-            r.addr = (uintptr_t)(cpu->cpreg_values + i);
-            ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
+            ret = kvm_get_one_reg(cs, regidx, cpu->cpreg_values + i);
              break;
          default:
              g_assert_not_reached();
@@ -678,17 +673,13 @@ int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu)
  void kvm_arm_get_virtual_time(CPUState *cs)
  {
      ARMCPU *cpu = ARM_CPU(cs);
-    struct kvm_one_reg reg = {
-        .id = KVM_REG_ARM_TIMER_CNT,
-        .addr = (uintptr_t)&cpu->kvm_vtime,
-    };
      int ret;
if (cpu->kvm_vtime_dirty) {
          return;
      }
- ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+    ret = kvm_get_one_reg(cs, KVM_REG_ARM_TIMER_CNT, &cpu->kvm_vtime);
      if (ret) {
          error_report("Failed to get KVM_REG_ARM_TIMER_CNT");
          abort();
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
index b4d02dff5381..66b52d6f8d23 100644
--- a/target/arm/kvm64.c
+++ b/target/arm/kvm64.c
@@ -908,14 +908,11 @@ int kvm_arch_put_registers(CPUState *cs, int level)
  static int kvm_arch_get_fpsimd(CPUState *cs)
  {
      CPUARMState *env = &ARM_CPU(cs)->env;
-    struct kvm_one_reg reg;
      int i, ret;
for (i = 0; i < 32; i++) {
          uint64_t *q = aa64_vfp_qreg(env, i);
-        reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
-        reg.addr = (uintptr_t)q;
-        ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+        ret = kvm_get_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]), q);
          if (ret) {
              return ret;
          } else {
@@ -939,15 +936,12 @@ static int kvm_arch_get_sve(CPUState *cs)
  {
      ARMCPU *cpu = ARM_CPU(cs);
      CPUARMState *env = &cpu->env;
-    struct kvm_one_reg reg;
      uint64_t *r;
      int n, ret;
for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) {
          r = &env->vfp.zregs[n].d[0];
-        reg.addr = (uintptr_t)r;
-        reg.id = KVM_REG_ARM64_SVE_ZREG(n, 0);
-        ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+        ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_ZREG(n, 0), r);
          if (ret) {
              return ret;
          }
@@ -956,9 +950,7 @@ static int kvm_arch_get_sve(CPUState *cs)
for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) {
          r = &env->vfp.pregs[n].p[0];
-        reg.addr = (uintptr_t)r;
-        reg.id = KVM_REG_ARM64_SVE_PREG(n, 0);
-        ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+        ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_PREG(n, 0), r);
          if (ret) {
              return ret;
          }
@@ -966,9 +958,7 @@ static int kvm_arch_get_sve(CPUState *cs)
      }
r = &env->vfp.pregs[FFR_PRED_NUM].p[0];
-    reg.addr = (uintptr_t)r;
-    reg.id = KVM_REG_ARM64_SVE_FFR(0);
-    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+    ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_FFR(0), r);
      if (ret) {
          return ret;
      }
@@ -979,7 +969,6 @@ static int kvm_arch_get_sve(CPUState *cs)
int kvm_arch_get_registers(CPUState *cs)
  {
-    struct kvm_one_reg reg;
      uint64_t val;
      unsigned int el;
      uint32_t fpr;
@@ -989,31 +978,24 @@ int kvm_arch_get_registers(CPUState *cs)
      CPUARMState *env = &cpu->env;
for (i = 0; i < 31; i++) {
-        reg.id = AARCH64_CORE_REG(regs.regs[i]);
-        reg.addr = (uintptr_t) &env->xregs[i];
-        ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+        ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.regs[i]),
+                              &env->xregs[i]);
          if (ret) {
              return ret;
          }
      }
- reg.id = AARCH64_CORE_REG(regs.sp);
-    reg.addr = (uintptr_t) &env->sp_el[0];
-    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+    ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.sp), &env->sp_el[0]);
      if (ret) {
          return ret;
      }
- reg.id = AARCH64_CORE_REG(sp_el1);
-    reg.addr = (uintptr_t) &env->sp_el[1];
-    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+    ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(sp_el1), &env->sp_el[1]);
      if (ret) {
          return ret;
      }
- reg.id = AARCH64_CORE_REG(regs.pstate);
-    reg.addr = (uintptr_t) &val;
-    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+    ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.pstate), &val);
      if (ret) {
          return ret;
      }
@@ -1030,9 +1012,7 @@ int kvm_arch_get_registers(CPUState *cs)
       */
      aarch64_restore_sp(env, 1);
- reg.id = AARCH64_CORE_REG(regs.pc);
-    reg.addr = (uintptr_t) &env->pc;
-    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+    ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.pc), &env->pc);
      if (ret) {
          return ret;
      }
@@ -1046,9 +1026,7 @@ int kvm_arch_get_registers(CPUState *cs)
          aarch64_sync_64_to_32(env);
      }
- reg.id = AARCH64_CORE_REG(elr_el1);
-    reg.addr = (uintptr_t) &env->elr_el[1];
-    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+    ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(elr_el1), &env->elr_el[1]);
      if (ret) {
          return ret;
      }
@@ -1058,9 +1036,8 @@ int kvm_arch_get_registers(CPUState *cs)
       * KVM SPSRs 0-4 map to QEMU banks 1-5
       */
      for (i = 0; i < KVM_NR_SPSR; i++) {
-        reg.id = AARCH64_CORE_REG(spsr[i]);
-        reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
-        ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+        ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(spsr[i]),
+                              &env->banked_spsr[i + 1]);
          if (ret) {
              return ret;
          }
@@ -1081,17 +1058,13 @@ int kvm_arch_get_registers(CPUState *cs)
          return ret;
      }
- reg.addr = (uintptr_t)(&fpr);
-    reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
-    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+    ret = kvm_get_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpsr), &fpr);
      if (ret) {
          return ret;
      }
      vfp_set_fpsr(env, fpr);
- reg.addr = (uintptr_t)(&fpr);
-    reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
-    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+    ret = kvm_get_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpcr), &fpr);
      if (ret) {
          return ret;
      }




reply via email to

[Prev in Thread] Current Thread [Next in Thread]