2 * AES round fragments, generic version
3 * SPDX-License-Identifier: GPL-2.0-or-later
5 * Copyright (C) 2023 Linaro, Ltd.
8 #ifndef CRYPTO_AES_ROUND_H
9 #define CRYPTO_AES_ROUND_H
11 /* Hosts with acceleration will usually need a 16-byte vector type. */
12 typedef uint8_t AESStateVec
__attribute__((vector_size(16)));
21 #include "host/crypto/aes-round.h"
27 void aesenc_MC_gen(AESState
*ret
, const AESState
*st
);
28 void aesenc_MC_genrev(AESState
*ret
, const AESState
*st
);
30 static inline void aesenc_MC(AESState
*r
, const AESState
*st
, bool be
)
33 aesenc_MC_accel(r
, st
, be
);
34 } else if (HOST_BIG_ENDIAN
== be
) {
37 aesenc_MC_genrev(r
, st
);
42 * Perform SubBytes + ShiftRows + AddRoundKey.
45 void aesenc_SB_SR_AK_gen(AESState
*ret
, const AESState
*st
,
47 void aesenc_SB_SR_AK_genrev(AESState
*ret
, const AESState
*st
,
50 static inline void aesenc_SB_SR_AK(AESState
*r
, const AESState
*st
,
51 const AESState
*rk
, bool be
)
54 aesenc_SB_SR_AK_accel(r
, st
, rk
, be
);
55 } else if (HOST_BIG_ENDIAN
== be
) {
56 aesenc_SB_SR_AK_gen(r
, st
, rk
);
58 aesenc_SB_SR_AK_genrev(r
, st
, rk
);
63 * Perform SubBytes + ShiftRows + MixColumns + AddRoundKey.
66 void aesenc_SB_SR_MC_AK_gen(AESState
*ret
, const AESState
*st
,
68 void aesenc_SB_SR_MC_AK_genrev(AESState
*ret
, const AESState
*st
,
71 static inline void aesenc_SB_SR_MC_AK(AESState
*r
, const AESState
*st
,
72 const AESState
*rk
, bool be
)
75 aesenc_SB_SR_MC_AK_accel(r
, st
, rk
, be
);
76 } else if (HOST_BIG_ENDIAN
== be
) {
77 aesenc_SB_SR_MC_AK_gen(r
, st
, rk
);
79 aesenc_SB_SR_MC_AK_genrev(r
, st
, rk
);
84 * Perform InvMixColumns.
87 void aesdec_IMC_gen(AESState
*ret
, const AESState
*st
);
88 void aesdec_IMC_genrev(AESState
*ret
, const AESState
*st
);
90 static inline void aesdec_IMC(AESState
*r
, const AESState
*st
, bool be
)
93 aesdec_IMC_accel(r
, st
, be
);
94 } else if (HOST_BIG_ENDIAN
== be
) {
95 aesdec_IMC_gen(r
, st
);
97 aesdec_IMC_genrev(r
, st
);
102 * Perform InvSubBytes + InvShiftRows + AddRoundKey.
105 void aesdec_ISB_ISR_AK_gen(AESState
*ret
, const AESState
*st
,
107 void aesdec_ISB_ISR_AK_genrev(AESState
*ret
, const AESState
*st
,
110 static inline void aesdec_ISB_ISR_AK(AESState
*r
, const AESState
*st
,
111 const AESState
*rk
, bool be
)
113 if (HAVE_AES_ACCEL
) {
114 aesdec_ISB_ISR_AK_accel(r
, st
, rk
, be
);
115 } else if (HOST_BIG_ENDIAN
== be
) {
116 aesdec_ISB_ISR_AK_gen(r
, st
, rk
);
118 aesdec_ISB_ISR_AK_genrev(r
, st
, rk
);
123 * Perform InvSubBytes + InvShiftRows + AddRoundKey + InvMixColumns.
126 void aesdec_ISB_ISR_AK_IMC_gen(AESState
*ret
, const AESState
*st
,
128 void aesdec_ISB_ISR_AK_IMC_genrev(AESState
*ret
, const AESState
*st
,
131 static inline void aesdec_ISB_ISR_AK_IMC(AESState
*r
, const AESState
*st
,
132 const AESState
*rk
, bool be
)
134 if (HAVE_AES_ACCEL
) {
135 aesdec_ISB_ISR_AK_IMC_accel(r
, st
, rk
, be
);
136 } else if (HOST_BIG_ENDIAN
== be
) {
137 aesdec_ISB_ISR_AK_IMC_gen(r
, st
, rk
);
139 aesdec_ISB_ISR_AK_IMC_genrev(r
, st
, rk
);
144 * Perform InvSubBytes + InvShiftRows + InvMixColumns + AddRoundKey.
147 void aesdec_ISB_ISR_IMC_AK_gen(AESState
*ret
, const AESState
*st
,
149 void aesdec_ISB_ISR_IMC_AK_genrev(AESState
*ret
, const AESState
*st
,
152 static inline void aesdec_ISB_ISR_IMC_AK(AESState
*r
, const AESState
*st
,
153 const AESState
*rk
, bool be
)
155 if (HAVE_AES_ACCEL
) {
156 aesdec_ISB_ISR_IMC_AK_accel(r
, st
, rk
, be
);
157 } else if (HOST_BIG_ENDIAN
== be
) {
158 aesdec_ISB_ISR_IMC_AK_gen(r
, st
, rk
);
160 aesdec_ISB_ISR_IMC_AK_genrev(r
, st
, rk
);
164 #endif /* CRYPTO_AES_ROUND_H */