2 * Carry-less multiply operations.
3 * SPDX-License-Identifier: GPL-2.0-or-later
5 * Copyright (C) 2023 Linaro, Ltd.
8 #include "qemu/osdep.h"
9 #include "crypto/clmul.h"
11 uint64_t clmul_8x8_low(uint64_t n
, uint64_t m
)
15 for (int i
= 0; i
< 8; ++i
) {
16 uint64_t mask
= (n
& 0x0101010101010101ull
) * 0xff;
18 m
= (m
<< 1) & 0xfefefefefefefefeull
;
24 static uint64_t clmul_8x4_even_int(uint64_t n
, uint64_t m
)
28 for (int i
= 0; i
< 8; ++i
) {
29 uint64_t mask
= (n
& 0x0001000100010001ull
) * 0xffff;
37 uint64_t clmul_8x4_even(uint64_t n
, uint64_t m
)
39 n
&= 0x00ff00ff00ff00ffull
;
40 m
&= 0x00ff00ff00ff00ffull
;
41 return clmul_8x4_even_int(n
, m
);
44 uint64_t clmul_8x4_odd(uint64_t n
, uint64_t m
)
46 return clmul_8x4_even(n
>> 8, m
>> 8);
49 static uint64_t unpack_8_to_16(uint64_t x
)
51 return (x
& 0x000000ff)
52 | ((x
& 0x0000ff00) << 8)
53 | ((x
& 0x00ff0000) << 16)
54 | ((x
& 0xff000000) << 24);
57 uint64_t clmul_8x4_packed(uint32_t n
, uint32_t m
)
59 return clmul_8x4_even_int(unpack_8_to_16(n
), unpack_8_to_16(m
));
62 uint64_t clmul_16x2_even(uint64_t n
, uint64_t m
)
66 n
&= 0x0000ffff0000ffffull
;
67 m
&= 0x0000ffff0000ffffull
;
69 for (int i
= 0; i
< 16; ++i
) {
70 uint64_t mask
= (n
& 0x0000000100000001ull
) * 0xffffffffull
;
78 uint64_t clmul_16x2_odd(uint64_t n
, uint64_t m
)
80 return clmul_16x2_even(n
>> 16, m
>> 16);
83 uint64_t clmul_32(uint32_t n
, uint32_t m32
)
88 for (int i
= 0; i
< 32; ++i
) {
96 Int128
clmul_64_gen(uint64_t n
, uint64_t m
)
98 uint64_t rl
= 0, rh
= 0;
100 /* Bit 0 can only influence the low 64-bit result. */
105 for (int i
= 1; i
< 64; ++i
) {
106 uint64_t mask
= -((n
>> i
) & 1);
107 rl
^= (m
<< i
) & mask
;
108 rh
^= (m
>> (64 - i
)) & mask
;
110 return int128_make128(rl
, rh
);