migration/rdma: Eliminate error_propagate()
[qemu/armbru.git] / include / crypto / aes-round.h
blob854fb0966a6ade7941719bf2639566f0b2f8e9ab
1 /*
2 * AES round fragments, generic version
3 * SPDX-License-Identifier: GPL-2.0-or-later
5 * Copyright (C) 2023 Linaro, Ltd.
6 */
8 #ifndef CRYPTO_AES_ROUND_H
9 #define CRYPTO_AES_ROUND_H
11 /* Hosts with acceleration will usually need a 16-byte vector type. */
12 typedef uint8_t AESStateVec __attribute__((vector_size(16)));
14 typedef union {
15 uint8_t b[16];
16 uint32_t w[4];
17 uint64_t d[2];
18 AESStateVec v;
19 } AESState;
21 #include "host/crypto/aes-round.h"
24 * Perform MixColumns.
27 void aesenc_MC_gen(AESState *ret, const AESState *st);
28 void aesenc_MC_genrev(AESState *ret, const AESState *st);
30 static inline void aesenc_MC(AESState *r, const AESState *st, bool be)
32 if (HAVE_AES_ACCEL) {
33 aesenc_MC_accel(r, st, be);
34 } else if (HOST_BIG_ENDIAN == be) {
35 aesenc_MC_gen(r, st);
36 } else {
37 aesenc_MC_genrev(r, st);
42 * Perform SubBytes + ShiftRows + AddRoundKey.
45 void aesenc_SB_SR_AK_gen(AESState *ret, const AESState *st,
46 const AESState *rk);
47 void aesenc_SB_SR_AK_genrev(AESState *ret, const AESState *st,
48 const AESState *rk);
50 static inline void aesenc_SB_SR_AK(AESState *r, const AESState *st,
51 const AESState *rk, bool be)
53 if (HAVE_AES_ACCEL) {
54 aesenc_SB_SR_AK_accel(r, st, rk, be);
55 } else if (HOST_BIG_ENDIAN == be) {
56 aesenc_SB_SR_AK_gen(r, st, rk);
57 } else {
58 aesenc_SB_SR_AK_genrev(r, st, rk);
63 * Perform SubBytes + ShiftRows + MixColumns + AddRoundKey.
66 void aesenc_SB_SR_MC_AK_gen(AESState *ret, const AESState *st,
67 const AESState *rk);
68 void aesenc_SB_SR_MC_AK_genrev(AESState *ret, const AESState *st,
69 const AESState *rk);
71 static inline void aesenc_SB_SR_MC_AK(AESState *r, const AESState *st,
72 const AESState *rk, bool be)
74 if (HAVE_AES_ACCEL) {
75 aesenc_SB_SR_MC_AK_accel(r, st, rk, be);
76 } else if (HOST_BIG_ENDIAN == be) {
77 aesenc_SB_SR_MC_AK_gen(r, st, rk);
78 } else {
79 aesenc_SB_SR_MC_AK_genrev(r, st, rk);
84 * Perform InvMixColumns.
87 void aesdec_IMC_gen(AESState *ret, const AESState *st);
88 void aesdec_IMC_genrev(AESState *ret, const AESState *st);
90 static inline void aesdec_IMC(AESState *r, const AESState *st, bool be)
92 if (HAVE_AES_ACCEL) {
93 aesdec_IMC_accel(r, st, be);
94 } else if (HOST_BIG_ENDIAN == be) {
95 aesdec_IMC_gen(r, st);
96 } else {
97 aesdec_IMC_genrev(r, st);
102 * Perform InvSubBytes + InvShiftRows + AddRoundKey.
105 void aesdec_ISB_ISR_AK_gen(AESState *ret, const AESState *st,
106 const AESState *rk);
107 void aesdec_ISB_ISR_AK_genrev(AESState *ret, const AESState *st,
108 const AESState *rk);
110 static inline void aesdec_ISB_ISR_AK(AESState *r, const AESState *st,
111 const AESState *rk, bool be)
113 if (HAVE_AES_ACCEL) {
114 aesdec_ISB_ISR_AK_accel(r, st, rk, be);
115 } else if (HOST_BIG_ENDIAN == be) {
116 aesdec_ISB_ISR_AK_gen(r, st, rk);
117 } else {
118 aesdec_ISB_ISR_AK_genrev(r, st, rk);
123 * Perform InvSubBytes + InvShiftRows + AddRoundKey + InvMixColumns.
126 void aesdec_ISB_ISR_AK_IMC_gen(AESState *ret, const AESState *st,
127 const AESState *rk);
128 void aesdec_ISB_ISR_AK_IMC_genrev(AESState *ret, const AESState *st,
129 const AESState *rk);
131 static inline void aesdec_ISB_ISR_AK_IMC(AESState *r, const AESState *st,
132 const AESState *rk, bool be)
134 if (HAVE_AES_ACCEL) {
135 aesdec_ISB_ISR_AK_IMC_accel(r, st, rk, be);
136 } else if (HOST_BIG_ENDIAN == be) {
137 aesdec_ISB_ISR_AK_IMC_gen(r, st, rk);
138 } else {
139 aesdec_ISB_ISR_AK_IMC_genrev(r, st, rk);
144 * Perform InvSubBytes + InvShiftRows + InvMixColumns + AddRoundKey.
147 void aesdec_ISB_ISR_IMC_AK_gen(AESState *ret, const AESState *st,
148 const AESState *rk);
149 void aesdec_ISB_ISR_IMC_AK_genrev(AESState *ret, const AESState *st,
150 const AESState *rk);
152 static inline void aesdec_ISB_ISR_IMC_AK(AESState *r, const AESState *st,
153 const AESState *rk, bool be)
155 if (HAVE_AES_ACCEL) {
156 aesdec_ISB_ISR_IMC_AK_accel(r, st, rk, be);
157 } else if (HOST_BIG_ENDIAN == be) {
158 aesdec_ISB_ISR_IMC_AK_gen(r, st, rk);
159 } else {
160 aesdec_ISB_ISR_IMC_AK_genrev(r, st, rk);
164 #endif /* CRYPTO_AES_ROUND_H */