[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH 31/35] host/include/aarch64: Implement aes-round.h
From: |
Richard Henderson |
Subject: |
[PATCH 31/35] host/include/aarch64: Implement aes-round.h |
Date: |
Fri, 2 Jun 2023 19:34:22 -0700 |
Detect AES in cpuinfo; implement the accel hooks.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
host/include/aarch64/host/aes-round.h | 204 ++++++++++++++++++++++++++
host/include/aarch64/host/cpuinfo.h | 1 +
util/cpuinfo-aarch64.c | 2 +
3 files changed, 207 insertions(+)
create mode 100644 host/include/aarch64/host/aes-round.h
diff --git a/host/include/aarch64/host/aes-round.h
b/host/include/aarch64/host/aes-round.h
new file mode 100644
index 0000000000..27ca823db6
--- /dev/null
+++ b/host/include/aarch64/host/aes-round.h
@@ -0,0 +1,204 @@
+/*
+ * AArch64 specific aes acceleration.
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HOST_AES_ROUND_H
+#define HOST_AES_ROUND_H
+
+#include "host/cpuinfo.h"
+#include <arm_neon.h>
+
+#ifdef __ARM_FEATURE_AES
+# define HAVE_AES_ACCEL true
+# define ATTR_AES_ACCEL
+#else
+# define HAVE_AES_ACCEL likely(cpuinfo & CPUINFO_AES)
+# define ATTR_AES_ACCEL __attribute__((target("+crypto")))
+#endif
+
+static inline uint8x16_t aes_accel_bswap(uint8x16_t x)
+{
+ /* No arm_neon.h primitive, and the compilers don't share builtins. */
+#ifdef __clang__
+ return __builtin_shufflevector(x, x, 15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0);
+#else
+ return __builtin_shuffle(x, (uint8x16_t)
+ { 15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0, });
+#endif
+}
+
+/*
+ * Through clang 15, the aes inlines are only defined if __ARM_FEATURE_AES;
+ * one cannot use __attribute__((target)) to make them appear after the fact.
+ * Therefore we must fallback to inline asm.
+ */
+#ifdef __ARM_FEATURE_AES
+# define aes_accel_aesd vaesdq_u8
+# define aes_accel_aese vaeseq_u8
+# define aes_accel_aesmc vaesmcq_u8
+# define aes_accel_aesimc vaesimcq_u8
+#else
+static inline uint8x16_t aes_accel_aesd(uint8x16_t d, uint8x16_t k)
+{
+ asm(".arch_extension aes\n\t"
+ "aesd %0.16b, %1.16b" : "+w"(d) : "w"(k));
+ return d;
+}
+
+static inline uint8x16_t aes_accel_aese(uint8x16_t d, uint8x16_t k)
+{
+ asm(".arch_extension aes\n\t"
+ "aese %0.16b, %1.16b" : "+w"(d) : "w"(k));
+ return d;
+}
+
+static inline uint8x16_t aes_accel_aesmc(uint8x16_t d)
+{
+ asm(".arch_extension aes\n\t"
+ "aesmc %0.16b, %1.16b" : "=w"(d) : "w"(d));
+ return d;
+}
+
+static inline uint8x16_t aes_accel_aesimc(uint8x16_t d)
+{
+ asm(".arch_extension aes\n\t"
+ "aesimc %0.16b, %1.16b" : "=w"(d) : "w"(d));
+ return d;
+}
+#endif /* __ARM_FEATURE_AES */
+
+static inline void ATTR_AES_ACCEL
+aesenc_MC_accel(AESState *ret, const AESState *st, bool be)
+{
+ uint8x16_t t = (uint8x16_t)st->v;
+
+ if (be) {
+ t = aes_accel_bswap(t);
+ t = aes_accel_aesmc(t);
+ t = aes_accel_bswap(t);
+ } else {
+ t = aes_accel_aesmc(t);
+ }
+ ret->v = (AESStateVec)t;
+}
+
+static inline void ATTR_AES_ACCEL
+aesenc_SB_SR_accel(AESState *ret, const AESState *st, bool be)
+{
+ uint8x16_t t = (uint8x16_t)st->v;
+ uint8x16_t z = { };
+
+ if (be) {
+ t = aes_accel_bswap(t);
+ t = aes_accel_aese(t, z);
+ t = aes_accel_bswap(t);
+ } else {
+ t = aes_accel_aese(t, z);
+ }
+ ret->v = (AESStateVec)t;
+}
+
+static inline void ATTR_AES_ACCEL
+aesenc_SB_SR_MC_AK_accel(AESState *ret, const AESState *st,
+ const AESState *rk, bool be)
+{
+ uint8x16_t t = (uint8x16_t)st->v;
+ uint8x16_t k = (uint8x16_t)rk->v;
+ uint8x16_t z = { };
+
+ if (be) {
+ t = aes_accel_bswap(t);
+ k = aes_accel_bswap(k);
+ t = aes_accel_aese(t, z);
+ t = aes_accel_aesmc(t);
+ t = veorq_u8(t, k);
+ t = aes_accel_bswap(t);
+ } else {
+ t = aes_accel_aese(t, z);
+ t = aes_accel_aesmc(t);
+ t = veorq_u8(t, k);
+ }
+ ret->v = (AESStateVec)t;
+}
+
+static inline void ATTR_AES_ACCEL
+aesdec_IMC_accel(AESState *ret, const AESState *st, bool be)
+{
+ uint8x16_t t = (uint8x16_t)st->v;
+
+ if (be) {
+ t = aes_accel_bswap(t);
+ t = aes_accel_aesimc(t);
+ t = aes_accel_bswap(t);
+ } else {
+ t = aes_accel_aesimc(t);
+ }
+ ret->v = (AESStateVec)t;
+}
+
+static inline void ATTR_AES_ACCEL
+aesdec_ISB_ISR_accel(AESState *ret, const AESState *st, bool be)
+{
+ uint8x16_t t = (uint8x16_t)st->v;
+ uint8x16_t z = { };
+
+ if (be) {
+ t = aes_accel_bswap(t);
+ t = aes_accel_aesd(t, z);
+ t = aes_accel_bswap(t);
+ } else {
+ t = aes_accel_aesd(t, z);
+ }
+ ret->v = (AESStateVec)t;
+}
+
+static inline void ATTR_AES_ACCEL
+aesdec_ISB_ISR_AK_IMC_accel(AESState *ret, const AESState *st,
+ const AESState *rk, bool be)
+{
+ uint8x16_t t = (uint8x16_t)st->v;
+ uint8x16_t k = (uint8x16_t)rk->v;
+ uint8x16_t z = { };
+
+ if (be) {
+ t = aes_accel_bswap(t);
+ k = aes_accel_bswap(k);
+ t = aes_accel_aesd(t, z);
+ t = veorq_u8(t, k);
+ t = aes_accel_aesimc(t);
+ t = aes_accel_bswap(t);
+ } else {
+ t = aes_accel_aesd(t, z);
+ t = veorq_u8(t, k);
+ t = aes_accel_aesimc(t);
+ }
+ ret->v = (AESStateVec)t;
+}
+
+static inline void ATTR_AES_ACCEL
+aesdec_ISB_ISR_IMC_AK_accel(AESState *ret, const AESState *st,
+ const AESState *rk, bool be)
+{
+ uint8x16_t t = (uint8x16_t)st->v;
+ uint8x16_t k = (uint8x16_t)rk->v;
+ uint8x16_t z = { };
+
+ if (be) {
+ t = aes_accel_bswap(t);
+ k = aes_accel_bswap(k);
+ t = aes_accel_aesd(t, z);
+ t = aes_accel_aesimc(t);
+ t = veorq_u8(t, k);
+ t = aes_accel_bswap(t);
+ } else {
+ t = aes_accel_aesd(t, z);
+ t = aes_accel_aesimc(t);
+ t = veorq_u8(t, k);
+ }
+ ret->v = (AESStateVec)t;
+}
+
+#endif
diff --git a/host/include/aarch64/host/cpuinfo.h
b/host/include/aarch64/host/cpuinfo.h
index 82227890b4..05feeb4f43 100644
--- a/host/include/aarch64/host/cpuinfo.h
+++ b/host/include/aarch64/host/cpuinfo.h
@@ -9,6 +9,7 @@
#define CPUINFO_ALWAYS (1u << 0) /* so cpuinfo is nonzero */
#define CPUINFO_LSE (1u << 1)
#define CPUINFO_LSE2 (1u << 2)
+#define CPUINFO_AES (1u << 3)
/* Initialized with a constructor. */
extern unsigned cpuinfo;
diff --git a/util/cpuinfo-aarch64.c b/util/cpuinfo-aarch64.c
index f99acb7884..ababc39550 100644
--- a/util/cpuinfo-aarch64.c
+++ b/util/cpuinfo-aarch64.c
@@ -56,10 +56,12 @@ unsigned __attribute__((constructor)) cpuinfo_init(void)
unsigned long hwcap = qemu_getauxval(AT_HWCAP);
info |= (hwcap & HWCAP_ATOMICS ? CPUINFO_LSE : 0);
info |= (hwcap & HWCAP_USCAT ? CPUINFO_LSE2 : 0);
+ info |= (hwcap & HWCAP_AES ? CPUINFO_AES: 0);
#endif
#ifdef CONFIG_DARWIN
info |= sysctl_for_bool("hw.optional.arm.FEAT_LSE") * CPUINFO_LSE;
info |= sysctl_for_bool("hw.optional.arm.FEAT_LSE2") * CPUINFO_LSE2;
+ info |= sysctl_for_bool("hw.optional.arm.FEAT_AES") * CPUINFO_AES;
#endif
cpuinfo = info;
--
2.34.1
- [PATCH 17/35] crypto: Add aesdec_IMC, (continued)
- [PATCH 17/35] crypto: Add aesdec_IMC, Richard Henderson, 2023/06/02
- [PATCH 21/35] crypto: Add aesenc_SB_SR_MC_AK, Richard Henderson, 2023/06/02
- [PATCH 23/35] target/ppc: Use aesenc_SB_SR_MC_AK, Richard Henderson, 2023/06/02
- [PATCH 24/35] target/riscv: Use aesenc_SB_SR_MC_AK, Richard Henderson, 2023/06/02
- [PATCH 26/35] target/i386: Use aesdec_ISB_ISR_IMC_AK, Richard Henderson, 2023/06/02
- [PATCH 25/35] crypto: Add aesdec_ISB_ISR_IMC_AK, Richard Henderson, 2023/06/02
- [PATCH 14/35] target/riscv: Use aesdec_ISB_ISR, Richard Henderson, 2023/06/02
- [PATCH 27/35] target/riscv: Use aesdec_ISB_ISR_IMC_AK, Richard Henderson, 2023/06/02
- [PATCH 30/35] host/include/i386: Implement aes-round.h, Richard Henderson, 2023/06/02
- [PATCH 15/35] crypto: Add aesenc_MC, Richard Henderson, 2023/06/02
- [PATCH 31/35] host/include/aarch64: Implement aes-round.h,
Richard Henderson <=
- [PATCH 16/35] target/arm: Use aesenc_MC, Richard Henderson, 2023/06/02
- [PATCH 34/35] crypto: Remove AES_imc, Richard Henderson, 2023/06/02
- [PATCH 35/35] crypto: Unexport AES_*_rot, AES_TeN, AES_TdN, Richard Henderson, 2023/06/02
- [PATCH 18/35] target/i386: Use aesdec_IMC, Richard Henderson, 2023/06/02
- [PATCH 20/35] target/riscv: Use aesdec_IMC, Richard Henderson, 2023/06/02
- [PATCH 22/35] target/i386: Use aesenc_SB_SR_MC_AK, Richard Henderson, 2023/06/02
- [PATCH 19/35] target/arm: Use aesdec_IMC, Richard Henderson, 2023/06/02
- [PATCH 29/35] target/ppc: Use aesdec_ISB_ISR_AK_IMC, Richard Henderson, 2023/06/02