2 * VMAC: Message Authentication Code using Universal Hashing
4 * Reference: https://tools.ietf.org/html/draft-krovetz-vmac-01
6 * Copyright (c) 2009, Intel Corporation.
7 * Copyright (c) 2018, Google Inc.
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
20 * Place - Suite 330, Boston, MA 02111-1307 USA.
25 * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
26 * This implementation is herby placed in the public domain.
27 * The authors offers no warranty. Use at your own risk.
28 * Last modified: 17 APR 08, 1700 PDT
31 #include <asm/unaligned.h>
32 #include <linux/init.h>
33 #include <linux/types.h>
34 #include <linux/crypto.h>
35 #include <linux/module.h>
36 #include <linux/scatterlist.h>
37 #include <asm/byteorder.h>
38 #include <crypto/scatterwalk.h>
39 #include <crypto/internal/hash.h>
42 * User definable settings.
44 #define VMAC_TAG_LEN 64
45 #define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */
46 #define VMAC_KEY_LEN (VMAC_KEY_SIZE/8)
47 #define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/
49 /* per-transform (per-key) context */
51 struct crypto_cipher
*cipher
;
52 u64 nhkey
[(VMAC_NHBYTES
/8)+2*(VMAC_TAG_LEN
/64-1)];
53 u64 polykey
[2*VMAC_TAG_LEN
/64];
54 u64 l3key
[2*VMAC_TAG_LEN
/64];
57 /* per-request context */
58 struct vmac_desc_ctx
{
60 u8 partial
[VMAC_NHBYTES
]; /* partial block */
61 __le64 partial_words
[VMAC_NHBYTES
/ 8];
63 unsigned int partial_size
; /* size of the partial block */
64 bool first_block_processed
;
65 u64 polytmp
[2*VMAC_TAG_LEN
/64]; /* running total of L2-hash */
71 #define UINT64_C(x) x##ULL
72 static const u64 p64
= UINT64_C(0xfffffffffffffeff); /* 2^64 - 257 prime */
73 static const u64 m62
= UINT64_C(0x3fffffffffffffff); /* 62-bit mask */
74 static const u64 m63
= UINT64_C(0x7fffffffffffffff); /* 63-bit mask */
75 static const u64 m64
= UINT64_C(0xffffffffffffffff); /* 64-bit mask */
76 static const u64 mpoly
= UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
78 #define pe64_to_cpup le64_to_cpup /* Prefer little endian */
80 #ifdef __LITTLE_ENDIAN
89 * The following routines are used in this implementation. They are
90 * written via macros to simulate zero-overhead call-by-reference.
92 * MUL64: 64x64->128-bit multiplication
93 * PMUL64: assumes top bits cleared on inputs
94 * ADD128: 128x128->128-bit addition
97 #define ADD128(rh, rl, ih, il) \
106 #define MUL32(i1, i2) ((u64)(u32)(i1)*(u32)(i2))
108 #define PMUL64(rh, rl, i1, i2) /* Assumes m doesn't overflow */ \
110 u64 _i1 = (i1), _i2 = (i2); \
111 u64 m = MUL32(_i1, _i2>>32) + MUL32(_i1>>32, _i2); \
112 rh = MUL32(_i1>>32, _i2>>32); \
113 rl = MUL32(_i1, _i2); \
114 ADD128(rh, rl, (m >> 32), (m << 32)); \
117 #define MUL64(rh, rl, i1, i2) \
119 u64 _i1 = (i1), _i2 = (i2); \
120 u64 m1 = MUL32(_i1, _i2>>32); \
121 u64 m2 = MUL32(_i1>>32, _i2); \
122 rh = MUL32(_i1>>32, _i2>>32); \
123 rl = MUL32(_i1, _i2); \
124 ADD128(rh, rl, (m1 >> 32), (m1 << 32)); \
125 ADD128(rh, rl, (m2 >> 32), (m2 << 32)); \
129 * For highest performance the L1 NH and L2 polynomial hashes should be
130 * carefully implemented to take advantage of one's target architecture.
131 * Here these two hash functions are defined multiple time; once for
132 * 64-bit architectures, once for 32-bit SSE2 architectures, and once
133 * for the rest (32-bit) architectures.
134 * For each, nh_16 *must* be defined (works on multiples of 16 bytes).
135 * Optionally, nh_vmac_nhbytes can be defined (for multiples of
136 * VMAC_NHBYTES), and nh_16_2 and nh_vmac_nhbytes_2 (versions that do two
137 * NH computations at once).
142 #define nh_16(mp, kp, nw, rh, rl) \
146 for (i = 0; i < nw; i += 2) { \
147 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
148 pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
149 ADD128(rh, rl, th, tl); \
153 #define nh_16_2(mp, kp, nw, rh, rl, rh1, rl1) \
156 rh1 = rl1 = rh = rl = 0; \
157 for (i = 0; i < nw; i += 2) { \
158 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
159 pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
160 ADD128(rh, rl, th, tl); \
161 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
162 pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
163 ADD128(rh1, rl1, th, tl); \
167 #if (VMAC_NHBYTES >= 64) /* These versions do 64-bytes of message at a time */
168 #define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
172 for (i = 0; i < nw; i += 8) { \
173 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
174 pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
175 ADD128(rh, rl, th, tl); \
176 MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
177 pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
178 ADD128(rh, rl, th, tl); \
179 MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
180 pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
181 ADD128(rh, rl, th, tl); \
182 MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
183 pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
184 ADD128(rh, rl, th, tl); \
188 #define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh1, rl1) \
191 rh1 = rl1 = rh = rl = 0; \
192 for (i = 0; i < nw; i += 8) { \
193 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
194 pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
195 ADD128(rh, rl, th, tl); \
196 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
197 pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
198 ADD128(rh1, rl1, th, tl); \
199 MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
200 pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
201 ADD128(rh, rl, th, tl); \
202 MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+4], \
203 pe64_to_cpup((mp)+i+3)+(kp)[i+5]); \
204 ADD128(rh1, rl1, th, tl); \
205 MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
206 pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
207 ADD128(rh, rl, th, tl); \
208 MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+6], \
209 pe64_to_cpup((mp)+i+5)+(kp)[i+7]); \
210 ADD128(rh1, rl1, th, tl); \
211 MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
212 pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
213 ADD128(rh, rl, th, tl); \
214 MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+8], \
215 pe64_to_cpup((mp)+i+7)+(kp)[i+9]); \
216 ADD128(rh1, rl1, th, tl); \
221 #define poly_step(ah, al, kh, kl, mh, ml) \
223 u64 t1h, t1l, t2h, t2l, t3h, t3l, z = 0; \
224 /* compute ab*cd, put bd into result registers */ \
225 PMUL64(t3h, t3l, al, kh); \
226 PMUL64(t2h, t2l, ah, kl); \
227 PMUL64(t1h, t1l, ah, 2*kh); \
228 PMUL64(ah, al, al, kl); \
229 /* add 2 * ac to result */ \
230 ADD128(ah, al, t1h, t1l); \
231 /* add together ad + bc */ \
232 ADD128(t2h, t2l, t3h, t3l); \
233 /* now (ah,al), (t2l,2*t2h) need summing */ \
234 /* first add the high registers, carrying into t2h */ \
235 ADD128(t2h, ah, z, t2l); \
236 /* double t2h and add top bit of ah */ \
237 t2h = 2 * t2h + (ah >> 63); \
239 /* now add the low registers */ \
240 ADD128(ah, al, mh, ml); \
241 ADD128(ah, al, z, t2h); \
244 #else /* ! CONFIG_64BIT */
247 #define nh_16(mp, kp, nw, rh, rl) \
249 u64 t1, t2, m1, m2, t; \
252 for (i = 0; i < nw; i += 2) { \
253 t1 = pe64_to_cpup(mp+i) + kp[i]; \
254 t2 = pe64_to_cpup(mp+i+1) + kp[i+1]; \
255 m2 = MUL32(t1 >> 32, t2); \
256 m1 = MUL32(t1, t2 >> 32); \
257 ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \
259 rh += (u64)(u32)(m1 >> 32) \
261 t += (u64)(u32)m1 + (u32)m2; \
263 ADD128(rh, rl, (t >> 32), (t << 32)); \
267 static void poly_step_func(u64
*ahi
, u64
*alo
,
268 const u64
*kh
, const u64
*kl
,
269 const u64
*mh
, const u64
*ml
)
271 #define a0 (*(((u32 *)alo)+INDEX_LOW))
272 #define a1 (*(((u32 *)alo)+INDEX_HIGH))
273 #define a2 (*(((u32 *)ahi)+INDEX_LOW))
274 #define a3 (*(((u32 *)ahi)+INDEX_HIGH))
275 #define k0 (*(((u32 *)kl)+INDEX_LOW))
276 #define k1 (*(((u32 *)kl)+INDEX_HIGH))
277 #define k2 (*(((u32 *)kh)+INDEX_LOW))
278 #define k3 (*(((u32 *)kh)+INDEX_HIGH))
295 t
|= ((u64
)((u32
)p
& 0x7fffffff)) << 32;
297 p
+= (u64
)(((u32
*)ml
)[INDEX_LOW
]);
306 p
+= (u64
)(((u32
*)ml
)[INDEX_HIGH
]);
313 *(u64
*)(alo
) = (p
<< 32) | t2
;
315 *(u64
*)(ahi
) = p
+ t
;
327 #define poly_step(ah, al, kh, kl, mh, ml) \
328 poly_step_func(&(ah), &(al), &(kh), &(kl), &(mh), &(ml))
330 #endif /* end of specialized NH and poly definitions */
332 /* At least nh_16 is defined. Defined others as needed here */
334 #define nh_16_2(mp, kp, nw, rh, rl, rh2, rl2) \
336 nh_16(mp, kp, nw, rh, rl); \
337 nh_16(mp, ((kp)+2), nw, rh2, rl2); \
340 #ifndef nh_vmac_nhbytes
341 #define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
342 nh_16(mp, kp, nw, rh, rl)
344 #ifndef nh_vmac_nhbytes_2
345 #define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh2, rl2) \
347 nh_vmac_nhbytes(mp, kp, nw, rh, rl); \
348 nh_vmac_nhbytes(mp, ((kp)+2), nw, rh2, rl2); \
352 static u64
l3hash(u64 p1
, u64 p2
, u64 k1
, u64 k2
, u64 len
)
354 u64 rh
, rl
, t
, z
= 0;
356 /* fully reduce (p1,p2)+(len,0) mod p127 */
359 ADD128(p1
, p2
, len
, t
);
360 /* At this point, (p1,p2) is at most 2^127+(len<<64) */
361 t
= (p1
> m63
) + ((p1
== m63
) && (p2
== m64
));
362 ADD128(p1
, p2
, z
, t
);
365 /* compute (p1,p2)/(2^64-2^32) and (p1,p2)%(2^64-2^32) */
368 t
+= (u32
)t
> 0xfffffffeu
;
372 /* compute (p1+k1)%p64 and (p2+k2)%p64 */
374 p1
+= (0 - (p1
< k1
)) & 257;
376 p2
+= (0 - (p2
< k2
)) & 257;
378 /* compute (p1+k1)*(p2+k2)%p64 */
379 MUL64(rh
, rl
, p1
, p2
);
381 ADD128(t
, rl
, z
, rh
);
383 ADD128(t
, rl
, z
, rh
);
386 rl
+= (0 - (rl
< t
)) & 257;
387 rl
+= (0 - (rl
> p64
-1)) & 257;
391 /* L1 and L2-hash one or more VMAC_NHBYTES-byte blocks */
392 static void vhash_blocks(const struct vmac_tfm_ctx
*tctx
,
393 struct vmac_desc_ctx
*dctx
,
394 const __le64
*mptr
, unsigned int blocks
)
396 const u64
*kptr
= tctx
->nhkey
;
397 const u64 pkh
= tctx
->polykey
[0];
398 const u64 pkl
= tctx
->polykey
[1];
399 u64 ch
= dctx
->polytmp
[0];
400 u64 cl
= dctx
->polytmp
[1];
403 if (!dctx
->first_block_processed
) {
404 dctx
->first_block_processed
= true;
405 nh_vmac_nhbytes(mptr
, kptr
, VMAC_NHBYTES
/8, rh
, rl
);
407 ADD128(ch
, cl
, rh
, rl
);
408 mptr
+= (VMAC_NHBYTES
/sizeof(u64
));
413 nh_vmac_nhbytes(mptr
, kptr
, VMAC_NHBYTES
/8, rh
, rl
);
415 poly_step(ch
, cl
, pkh
, pkl
, rh
, rl
);
416 mptr
+= (VMAC_NHBYTES
/sizeof(u64
));
419 dctx
->polytmp
[0] = ch
;
420 dctx
->polytmp
[1] = cl
;
423 static int vmac_setkey(struct crypto_shash
*tfm
,
424 const u8
*key
, unsigned int keylen
)
426 struct vmac_tfm_ctx
*tctx
= crypto_shash_ctx(tfm
);
432 if (keylen
!= VMAC_KEY_LEN
) {
433 crypto_shash_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
437 err
= crypto_cipher_setkey(tctx
->cipher
, key
, keylen
);
443 for (i
= 0; i
< ARRAY_SIZE(tctx
->nhkey
); i
+= 2) {
444 crypto_cipher_encrypt_one(tctx
->cipher
, (u8
*)out
, in
);
445 tctx
->nhkey
[i
] = be64_to_cpu(out
[0]);
446 tctx
->nhkey
[i
+1] = be64_to_cpu(out
[1]);
453 for (i
= 0; i
< ARRAY_SIZE(tctx
->polykey
); i
+= 2) {
454 crypto_cipher_encrypt_one(tctx
->cipher
, (u8
*)out
, in
);
455 tctx
->polykey
[i
] = be64_to_cpu(out
[0]) & mpoly
;
456 tctx
->polykey
[i
+1] = be64_to_cpu(out
[1]) & mpoly
;
463 for (i
= 0; i
< ARRAY_SIZE(tctx
->l3key
); i
+= 2) {
465 crypto_cipher_encrypt_one(tctx
->cipher
, (u8
*)out
, in
);
466 tctx
->l3key
[i
] = be64_to_cpu(out
[0]);
467 tctx
->l3key
[i
+1] = be64_to_cpu(out
[1]);
469 } while (tctx
->l3key
[i
] >= p64
|| tctx
->l3key
[i
+1] >= p64
);
475 static int vmac_init(struct shash_desc
*desc
)
477 const struct vmac_tfm_ctx
*tctx
= crypto_shash_ctx(desc
->tfm
);
478 struct vmac_desc_ctx
*dctx
= shash_desc_ctx(desc
);
480 dctx
->partial_size
= 0;
481 dctx
->first_block_processed
= false;
482 memcpy(dctx
->polytmp
, tctx
->polykey
, sizeof(dctx
->polytmp
));
486 static int vmac_update(struct shash_desc
*desc
, const u8
*p
, unsigned int len
)
488 const struct vmac_tfm_ctx
*tctx
= crypto_shash_ctx(desc
->tfm
);
489 struct vmac_desc_ctx
*dctx
= shash_desc_ctx(desc
);
492 if (dctx
->partial_size
) {
493 n
= min(len
, VMAC_NHBYTES
- dctx
->partial_size
);
494 memcpy(&dctx
->partial
[dctx
->partial_size
], p
, n
);
495 dctx
->partial_size
+= n
;
498 if (dctx
->partial_size
== VMAC_NHBYTES
) {
499 vhash_blocks(tctx
, dctx
, dctx
->partial_words
, 1);
500 dctx
->partial_size
= 0;
504 if (len
>= VMAC_NHBYTES
) {
505 n
= round_down(len
, VMAC_NHBYTES
);
506 /* TODO: 'p' may be misaligned here */
507 vhash_blocks(tctx
, dctx
, (const __le64
*)p
, n
/ VMAC_NHBYTES
);
513 memcpy(dctx
->partial
, p
, len
);
514 dctx
->partial_size
= len
;
520 static u64
vhash_final(const struct vmac_tfm_ctx
*tctx
,
521 struct vmac_desc_ctx
*dctx
)
523 unsigned int partial
= dctx
->partial_size
;
524 u64 ch
= dctx
->polytmp
[0];
525 u64 cl
= dctx
->polytmp
[1];
527 /* L1 and L2-hash the final block if needed */
529 /* Zero-pad to next 128-bit boundary */
530 unsigned int n
= round_up(partial
, 16);
533 memset(&dctx
->partial
[partial
], 0, n
- partial
);
534 nh_16(dctx
->partial_words
, tctx
->nhkey
, n
/ 8, rh
, rl
);
536 if (dctx
->first_block_processed
)
537 poly_step(ch
, cl
, tctx
->polykey
[0], tctx
->polykey
[1],
540 ADD128(ch
, cl
, rh
, rl
);
543 /* L3-hash the 128-bit output of L2-hash */
544 return l3hash(ch
, cl
, tctx
->l3key
[0], tctx
->l3key
[1], partial
* 8);
547 static int vmac_final(struct shash_desc
*desc
, u8
*out
)
549 const struct vmac_tfm_ctx
*tctx
= crypto_shash_ctx(desc
->tfm
);
550 struct vmac_desc_ctx
*dctx
= shash_desc_ctx(desc
);
551 static const u8 nonce
[16] = {}; /* TODO: this is insecure */
559 /* Finish calculating the VHASH of the message */
560 hash
= vhash_final(tctx
, dctx
);
562 /* Generate pseudorandom pad by encrypting the nonce */
563 memcpy(&block
, nonce
, 16);
564 index
= block
.bytes
[15] & 1;
565 block
.bytes
[15] &= ~1;
566 crypto_cipher_encrypt_one(tctx
->cipher
, block
.bytes
, block
.bytes
);
567 pad
= be64_to_cpu(block
.pads
[index
]);
569 /* The VMAC is the sum of VHASH and the pseudorandom pad */
570 put_unaligned_le64(hash
+ pad
, out
);
574 static int vmac_init_tfm(struct crypto_tfm
*tfm
)
576 struct crypto_instance
*inst
= crypto_tfm_alg_instance(tfm
);
577 struct crypto_spawn
*spawn
= crypto_instance_ctx(inst
);
578 struct vmac_tfm_ctx
*tctx
= crypto_tfm_ctx(tfm
);
579 struct crypto_cipher
*cipher
;
581 cipher
= crypto_spawn_cipher(spawn
);
583 return PTR_ERR(cipher
);
585 tctx
->cipher
= cipher
;
589 static void vmac_exit_tfm(struct crypto_tfm
*tfm
)
591 struct vmac_tfm_ctx
*tctx
= crypto_tfm_ctx(tfm
);
593 crypto_free_cipher(tctx
->cipher
);
596 static int vmac_create(struct crypto_template
*tmpl
, struct rtattr
**tb
)
598 struct shash_instance
*inst
;
599 struct crypto_alg
*alg
;
602 err
= crypto_check_attr_type(tb
, CRYPTO_ALG_TYPE_SHASH
);
606 alg
= crypto_get_attr_alg(tb
, CRYPTO_ALG_TYPE_CIPHER
,
607 CRYPTO_ALG_TYPE_MASK
);
612 if (alg
->cra_blocksize
!= 16)
615 inst
= shash_alloc_instance("vmac", alg
);
620 err
= crypto_init_spawn(shash_instance_ctx(inst
), alg
,
621 shash_crypto_instance(inst
),
622 CRYPTO_ALG_TYPE_MASK
);
626 inst
->alg
.base
.cra_priority
= alg
->cra_priority
;
627 inst
->alg
.base
.cra_blocksize
= alg
->cra_blocksize
;
628 inst
->alg
.base
.cra_alignmask
= alg
->cra_alignmask
;
630 inst
->alg
.base
.cra_ctxsize
= sizeof(struct vmac_tfm_ctx
);
631 inst
->alg
.base
.cra_init
= vmac_init_tfm
;
632 inst
->alg
.base
.cra_exit
= vmac_exit_tfm
;
634 inst
->alg
.descsize
= sizeof(struct vmac_desc_ctx
);
635 inst
->alg
.digestsize
= VMAC_TAG_LEN
/ 8;
636 inst
->alg
.init
= vmac_init
;
637 inst
->alg
.update
= vmac_update
;
638 inst
->alg
.final
= vmac_final
;
639 inst
->alg
.setkey
= vmac_setkey
;
641 err
= shash_register_instance(tmpl
, inst
);
644 shash_free_instance(shash_crypto_instance(inst
));
652 static struct crypto_template vmac_tmpl
= {
654 .create
= vmac_create
,
655 .free
= shash_free_instance
,
656 .module
= THIS_MODULE
,
659 static int __init
vmac_module_init(void)
661 return crypto_register_template(&vmac_tmpl
);
664 static void __exit
vmac_module_exit(void)
666 crypto_unregister_template(&vmac_tmpl
);
669 module_init(vmac_module_init
);
670 module_exit(vmac_module_exit
);
672 MODULE_LICENSE("GPL");
673 MODULE_DESCRIPTION("VMAC hash algorithm");
674 MODULE_ALIAS_CRYPTO("vmac");