2 * Support for Intel AES-NI instructions. This file contains glue
3 * code, the real AES implementation is in intel-aes_asm.S.
5 * Copyright (C) 2008, Intel Corp.
6 * Author: Huang Ying <ying.huang@intel.com>
8 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9 * interface for 64-bit kernels.
10 * Authors: Adrian Hoban <adrian.hoban@intel.com>
11 * Gabriele Paoloni <gabriele.paoloni@intel.com>
12 * Tadeusz Struk (tadeusz.struk@intel.com)
13 * Aidan O'Mahony (aidan.o.mahony@intel.com)
14 * Copyright (c) 2010, Intel Corporation.
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
22 #include <linux/hardirq.h>
23 #include <linux/types.h>
24 #include <linux/crypto.h>
25 #include <linux/module.h>
26 #include <linux/err.h>
27 #include <crypto/algapi.h>
28 #include <crypto/aes.h>
29 #include <crypto/cryptd.h>
30 #include <crypto/ctr.h>
31 #include <asm/cpu_device_id.h>
34 #include <crypto/scatterwalk.h>
35 #include <crypto/internal/aead.h>
36 #include <linux/workqueue.h>
37 #include <linux/spinlock.h>
39 #if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE)
43 #if defined(CONFIG_CRYPTO_LRW) || defined(CONFIG_CRYPTO_LRW_MODULE)
47 #if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
51 #if defined(CONFIG_CRYPTO_XTS) || defined(CONFIG_CRYPTO_XTS_MODULE)
55 struct async_aes_ctx
{
56 struct cryptd_ablkcipher
*cryptd_tfm
;
59 /* This data is stored at the end of the crypto_tfm struct.
60 * It's a type of per "session" data storage location.
61 * This needs to be 16 byte aligned.
63 struct aesni_rfc4106_gcm_ctx
{
65 struct crypto_aes_ctx aes_key_expanded
;
67 struct cryptd_aead
*cryptd_tfm
;
70 struct aesni_gcm_set_hash_subkey_result
{
72 struct completion completion
;
75 struct aesni_hash_subkey_req_data
{
77 struct aesni_gcm_set_hash_subkey_result result
;
78 struct scatterlist sg
;
81 #define AESNI_ALIGN (16)
82 #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
83 #define RFC4106_HASH_SUBKEY_SIZE 16
85 asmlinkage
int aesni_set_key(struct crypto_aes_ctx
*ctx
, const u8
*in_key
,
86 unsigned int key_len
);
87 asmlinkage
void aesni_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
89 asmlinkage
void aesni_dec(struct crypto_aes_ctx
*ctx
, u8
*out
,
91 asmlinkage
void aesni_ecb_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
92 const u8
*in
, unsigned int len
);
93 asmlinkage
void aesni_ecb_dec(struct crypto_aes_ctx
*ctx
, u8
*out
,
94 const u8
*in
, unsigned int len
);
95 asmlinkage
void aesni_cbc_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
96 const u8
*in
, unsigned int len
, u8
*iv
);
97 asmlinkage
void aesni_cbc_dec(struct crypto_aes_ctx
*ctx
, u8
*out
,
98 const u8
*in
, unsigned int len
, u8
*iv
);
100 int crypto_fpu_init(void);
101 void crypto_fpu_exit(void);
104 asmlinkage
void aesni_ctr_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
105 const u8
*in
, unsigned int len
, u8
*iv
);
107 /* asmlinkage void aesni_gcm_enc()
108 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
109 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
110 * const u8 *in, Plaintext input
111 * unsigned long plaintext_len, Length of data in bytes for encryption.
112 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
113 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
114 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
115 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
116 * const u8 *aad, Additional Authentication Data (AAD)
117 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
118 * is going to be 8 or 12 bytes
119 * u8 *auth_tag, Authenticated Tag output.
120 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
121 * Valid values are 16 (most likely), 12 or 8.
123 asmlinkage
void aesni_gcm_enc(void *ctx
, u8
*out
,
124 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
125 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
126 u8
*auth_tag
, unsigned long auth_tag_len
);
128 /* asmlinkage void aesni_gcm_dec()
129 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
130 * u8 *out, Plaintext output. Decrypt in-place is allowed.
131 * const u8 *in, Ciphertext input
132 * unsigned long ciphertext_len, Length of data in bytes for decryption.
133 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
134 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
135 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
136 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
137 * const u8 *aad, Additional Authentication Data (AAD)
138 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
139 * to be 8 or 12 bytes
140 * u8 *auth_tag, Authenticated Tag output.
141 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
142 * Valid values are 16 (most likely), 12 or 8.
144 asmlinkage
void aesni_gcm_dec(void *ctx
, u8
*out
,
145 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
146 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
147 u8
*auth_tag
, unsigned long auth_tag_len
);
150 aesni_rfc4106_gcm_ctx
*aesni_rfc4106_gcm_ctx_get(struct crypto_aead
*tfm
)
153 (struct aesni_rfc4106_gcm_ctx
*)
155 crypto_tfm_ctx(crypto_aead_tfm(tfm
)), AESNI_ALIGN
);
159 static inline struct crypto_aes_ctx
*aes_ctx(void *raw_ctx
)
161 unsigned long addr
= (unsigned long)raw_ctx
;
162 unsigned long align
= AESNI_ALIGN
;
164 if (align
<= crypto_tfm_ctx_alignment())
166 return (struct crypto_aes_ctx
*)ALIGN(addr
, align
);
169 static int aes_set_key_common(struct crypto_tfm
*tfm
, void *raw_ctx
,
170 const u8
*in_key
, unsigned int key_len
)
172 struct crypto_aes_ctx
*ctx
= aes_ctx(raw_ctx
);
173 u32
*flags
= &tfm
->crt_flags
;
176 if (key_len
!= AES_KEYSIZE_128
&& key_len
!= AES_KEYSIZE_192
&&
177 key_len
!= AES_KEYSIZE_256
) {
178 *flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
182 if (!irq_fpu_usable())
183 err
= crypto_aes_expand_key(ctx
, in_key
, key_len
);
186 err
= aesni_set_key(ctx
, in_key
, key_len
);
193 static int aes_set_key(struct crypto_tfm
*tfm
, const u8
*in_key
,
194 unsigned int key_len
)
196 return aes_set_key_common(tfm
, crypto_tfm_ctx(tfm
), in_key
, key_len
);
199 static void aes_encrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
201 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
203 if (!irq_fpu_usable())
204 crypto_aes_encrypt_x86(ctx
, dst
, src
);
207 aesni_enc(ctx
, dst
, src
);
212 static void aes_decrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
214 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
216 if (!irq_fpu_usable())
217 crypto_aes_decrypt_x86(ctx
, dst
, src
);
220 aesni_dec(ctx
, dst
, src
);
225 static void __aes_encrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
227 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
229 aesni_enc(ctx
, dst
, src
);
232 static void __aes_decrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
234 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
236 aesni_dec(ctx
, dst
, src
);
239 static int ecb_encrypt(struct blkcipher_desc
*desc
,
240 struct scatterlist
*dst
, struct scatterlist
*src
,
243 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
244 struct blkcipher_walk walk
;
247 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
248 err
= blkcipher_walk_virt(desc
, &walk
);
249 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
252 while ((nbytes
= walk
.nbytes
)) {
253 aesni_ecb_enc(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
254 nbytes
& AES_BLOCK_MASK
);
255 nbytes
&= AES_BLOCK_SIZE
- 1;
256 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
263 static int ecb_decrypt(struct blkcipher_desc
*desc
,
264 struct scatterlist
*dst
, struct scatterlist
*src
,
267 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
268 struct blkcipher_walk walk
;
271 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
272 err
= blkcipher_walk_virt(desc
, &walk
);
273 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
276 while ((nbytes
= walk
.nbytes
)) {
277 aesni_ecb_dec(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
278 nbytes
& AES_BLOCK_MASK
);
279 nbytes
&= AES_BLOCK_SIZE
- 1;
280 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
287 static int cbc_encrypt(struct blkcipher_desc
*desc
,
288 struct scatterlist
*dst
, struct scatterlist
*src
,
291 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
292 struct blkcipher_walk walk
;
295 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
296 err
= blkcipher_walk_virt(desc
, &walk
);
297 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
300 while ((nbytes
= walk
.nbytes
)) {
301 aesni_cbc_enc(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
302 nbytes
& AES_BLOCK_MASK
, walk
.iv
);
303 nbytes
&= AES_BLOCK_SIZE
- 1;
304 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
311 static int cbc_decrypt(struct blkcipher_desc
*desc
,
312 struct scatterlist
*dst
, struct scatterlist
*src
,
315 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
316 struct blkcipher_walk walk
;
319 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
320 err
= blkcipher_walk_virt(desc
, &walk
);
321 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
324 while ((nbytes
= walk
.nbytes
)) {
325 aesni_cbc_dec(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
326 nbytes
& AES_BLOCK_MASK
, walk
.iv
);
327 nbytes
&= AES_BLOCK_SIZE
- 1;
328 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
336 static void ctr_crypt_final(struct crypto_aes_ctx
*ctx
,
337 struct blkcipher_walk
*walk
)
339 u8
*ctrblk
= walk
->iv
;
340 u8 keystream
[AES_BLOCK_SIZE
];
341 u8
*src
= walk
->src
.virt
.addr
;
342 u8
*dst
= walk
->dst
.virt
.addr
;
343 unsigned int nbytes
= walk
->nbytes
;
345 aesni_enc(ctx
, keystream
, ctrblk
);
346 crypto_xor(keystream
, src
, nbytes
);
347 memcpy(dst
, keystream
, nbytes
);
348 crypto_inc(ctrblk
, AES_BLOCK_SIZE
);
351 static int ctr_crypt(struct blkcipher_desc
*desc
,
352 struct scatterlist
*dst
, struct scatterlist
*src
,
355 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
356 struct blkcipher_walk walk
;
359 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
360 err
= blkcipher_walk_virt_block(desc
, &walk
, AES_BLOCK_SIZE
);
361 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
364 while ((nbytes
= walk
.nbytes
) >= AES_BLOCK_SIZE
) {
365 aesni_ctr_enc(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
366 nbytes
& AES_BLOCK_MASK
, walk
.iv
);
367 nbytes
&= AES_BLOCK_SIZE
- 1;
368 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
371 ctr_crypt_final(ctx
, &walk
);
372 err
= blkcipher_walk_done(desc
, &walk
, 0);
380 static int ablk_set_key(struct crypto_ablkcipher
*tfm
, const u8
*key
,
381 unsigned int key_len
)
383 struct async_aes_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
384 struct crypto_ablkcipher
*child
= &ctx
->cryptd_tfm
->base
;
387 crypto_ablkcipher_clear_flags(child
, CRYPTO_TFM_REQ_MASK
);
388 crypto_ablkcipher_set_flags(child
, crypto_ablkcipher_get_flags(tfm
)
389 & CRYPTO_TFM_REQ_MASK
);
390 err
= crypto_ablkcipher_setkey(child
, key
, key_len
);
391 crypto_ablkcipher_set_flags(tfm
, crypto_ablkcipher_get_flags(child
)
392 & CRYPTO_TFM_RES_MASK
);
396 static int ablk_encrypt(struct ablkcipher_request
*req
)
398 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
399 struct async_aes_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
401 if (!irq_fpu_usable()) {
402 struct ablkcipher_request
*cryptd_req
=
403 ablkcipher_request_ctx(req
);
404 memcpy(cryptd_req
, req
, sizeof(*req
));
405 ablkcipher_request_set_tfm(cryptd_req
, &ctx
->cryptd_tfm
->base
);
406 return crypto_ablkcipher_encrypt(cryptd_req
);
408 struct blkcipher_desc desc
;
409 desc
.tfm
= cryptd_ablkcipher_child(ctx
->cryptd_tfm
);
410 desc
.info
= req
->info
;
412 return crypto_blkcipher_crt(desc
.tfm
)->encrypt(
413 &desc
, req
->dst
, req
->src
, req
->nbytes
);
417 static int ablk_decrypt(struct ablkcipher_request
*req
)
419 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
420 struct async_aes_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
422 if (!irq_fpu_usable()) {
423 struct ablkcipher_request
*cryptd_req
=
424 ablkcipher_request_ctx(req
);
425 memcpy(cryptd_req
, req
, sizeof(*req
));
426 ablkcipher_request_set_tfm(cryptd_req
, &ctx
->cryptd_tfm
->base
);
427 return crypto_ablkcipher_decrypt(cryptd_req
);
429 struct blkcipher_desc desc
;
430 desc
.tfm
= cryptd_ablkcipher_child(ctx
->cryptd_tfm
);
431 desc
.info
= req
->info
;
433 return crypto_blkcipher_crt(desc
.tfm
)->decrypt(
434 &desc
, req
->dst
, req
->src
, req
->nbytes
);
438 static void ablk_exit(struct crypto_tfm
*tfm
)
440 struct async_aes_ctx
*ctx
= crypto_tfm_ctx(tfm
);
442 cryptd_free_ablkcipher(ctx
->cryptd_tfm
);
445 static int ablk_init_common(struct crypto_tfm
*tfm
, const char *drv_name
)
447 struct async_aes_ctx
*ctx
= crypto_tfm_ctx(tfm
);
448 struct cryptd_ablkcipher
*cryptd_tfm
;
450 cryptd_tfm
= cryptd_alloc_ablkcipher(drv_name
, 0, 0);
451 if (IS_ERR(cryptd_tfm
))
452 return PTR_ERR(cryptd_tfm
);
454 ctx
->cryptd_tfm
= cryptd_tfm
;
455 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct ablkcipher_request
) +
456 crypto_ablkcipher_reqsize(&cryptd_tfm
->base
);
461 static int ablk_ecb_init(struct crypto_tfm
*tfm
)
463 return ablk_init_common(tfm
, "__driver-ecb-aes-aesni");
466 static int ablk_cbc_init(struct crypto_tfm
*tfm
)
468 return ablk_init_common(tfm
, "__driver-cbc-aes-aesni");
472 static int ablk_ctr_init(struct crypto_tfm
*tfm
)
474 return ablk_init_common(tfm
, "__driver-ctr-aes-aesni");
478 static int ablk_rfc3686_ctr_init(struct crypto_tfm
*tfm
)
480 return ablk_init_common(tfm
, "rfc3686(__driver-ctr-aes-aesni)");
486 static int ablk_lrw_init(struct crypto_tfm
*tfm
)
488 return ablk_init_common(tfm
, "fpu(lrw(__driver-aes-aesni))");
493 static int ablk_pcbc_init(struct crypto_tfm
*tfm
)
495 return ablk_init_common(tfm
, "fpu(pcbc(__driver-aes-aesni))");
500 static int ablk_xts_init(struct crypto_tfm
*tfm
)
502 return ablk_init_common(tfm
, "fpu(xts(__driver-aes-aesni))");
507 static int rfc4106_init(struct crypto_tfm
*tfm
)
509 struct cryptd_aead
*cryptd_tfm
;
510 struct aesni_rfc4106_gcm_ctx
*ctx
= (struct aesni_rfc4106_gcm_ctx
*)
511 PTR_ALIGN((u8
*)crypto_tfm_ctx(tfm
), AESNI_ALIGN
);
512 struct crypto_aead
*cryptd_child
;
513 struct aesni_rfc4106_gcm_ctx
*child_ctx
;
514 cryptd_tfm
= cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
515 if (IS_ERR(cryptd_tfm
))
516 return PTR_ERR(cryptd_tfm
);
518 cryptd_child
= cryptd_aead_child(cryptd_tfm
);
519 child_ctx
= aesni_rfc4106_gcm_ctx_get(cryptd_child
);
520 memcpy(child_ctx
, ctx
, sizeof(*ctx
));
521 ctx
->cryptd_tfm
= cryptd_tfm
;
522 tfm
->crt_aead
.reqsize
= sizeof(struct aead_request
)
523 + crypto_aead_reqsize(&cryptd_tfm
->base
);
527 static void rfc4106_exit(struct crypto_tfm
*tfm
)
529 struct aesni_rfc4106_gcm_ctx
*ctx
=
530 (struct aesni_rfc4106_gcm_ctx
*)
531 PTR_ALIGN((u8
*)crypto_tfm_ctx(tfm
), AESNI_ALIGN
);
532 if (!IS_ERR(ctx
->cryptd_tfm
))
533 cryptd_free_aead(ctx
->cryptd_tfm
);
538 rfc4106_set_hash_subkey_done(struct crypto_async_request
*req
, int err
)
540 struct aesni_gcm_set_hash_subkey_result
*result
= req
->data
;
542 if (err
== -EINPROGRESS
)
545 complete(&result
->completion
);
549 rfc4106_set_hash_subkey(u8
*hash_subkey
, const u8
*key
, unsigned int key_len
)
551 struct crypto_ablkcipher
*ctr_tfm
;
552 struct ablkcipher_request
*req
;
554 struct aesni_hash_subkey_req_data
*req_data
;
556 ctr_tfm
= crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
558 return PTR_ERR(ctr_tfm
);
560 crypto_ablkcipher_clear_flags(ctr_tfm
, ~0);
562 ret
= crypto_ablkcipher_setkey(ctr_tfm
, key
, key_len
);
564 goto out_free_ablkcipher
;
567 req
= ablkcipher_request_alloc(ctr_tfm
, GFP_KERNEL
);
569 goto out_free_ablkcipher
;
571 req_data
= kmalloc(sizeof(*req_data
), GFP_KERNEL
);
573 goto out_free_request
;
575 memset(req_data
->iv
, 0, sizeof(req_data
->iv
));
577 /* Clear the data in the hash sub key container to zero.*/
578 /* We want to cipher all zeros to create the hash sub key. */
579 memset(hash_subkey
, 0, RFC4106_HASH_SUBKEY_SIZE
);
581 init_completion(&req_data
->result
.completion
);
582 sg_init_one(&req_data
->sg
, hash_subkey
, RFC4106_HASH_SUBKEY_SIZE
);
583 ablkcipher_request_set_tfm(req
, ctr_tfm
);
584 ablkcipher_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_SLEEP
|
585 CRYPTO_TFM_REQ_MAY_BACKLOG
,
586 rfc4106_set_hash_subkey_done
,
589 ablkcipher_request_set_crypt(req
, &req_data
->sg
,
590 &req_data
->sg
, RFC4106_HASH_SUBKEY_SIZE
, req_data
->iv
);
592 ret
= crypto_ablkcipher_encrypt(req
);
593 if (ret
== -EINPROGRESS
|| ret
== -EBUSY
) {
594 ret
= wait_for_completion_interruptible
595 (&req_data
->result
.completion
);
597 ret
= req_data
->result
.err
;
601 ablkcipher_request_free(req
);
603 crypto_free_ablkcipher(ctr_tfm
);
607 static int rfc4106_set_key(struct crypto_aead
*parent
, const u8
*key
,
608 unsigned int key_len
)
611 struct crypto_tfm
*tfm
= crypto_aead_tfm(parent
);
612 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(parent
);
613 struct crypto_aead
*cryptd_child
= cryptd_aead_child(ctx
->cryptd_tfm
);
614 struct aesni_rfc4106_gcm_ctx
*child_ctx
=
615 aesni_rfc4106_gcm_ctx_get(cryptd_child
);
616 u8
*new_key_mem
= NULL
;
619 crypto_tfm_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
622 /*Account for 4 byte nonce at the end.*/
624 if (key_len
!= AES_KEYSIZE_128
) {
625 crypto_tfm_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
629 memcpy(ctx
->nonce
, key
+ key_len
, sizeof(ctx
->nonce
));
630 /*This must be on a 16 byte boundary!*/
631 if ((unsigned long)(&(ctx
->aes_key_expanded
.key_enc
[0])) % AESNI_ALIGN
)
634 if ((unsigned long)key
% AESNI_ALIGN
) {
635 /*key is not aligned: use an auxuliar aligned pointer*/
636 new_key_mem
= kmalloc(key_len
+AESNI_ALIGN
, GFP_KERNEL
);
640 new_key_mem
= PTR_ALIGN(new_key_mem
, AESNI_ALIGN
);
641 memcpy(new_key_mem
, key
, key_len
);
645 if (!irq_fpu_usable())
646 ret
= crypto_aes_expand_key(&(ctx
->aes_key_expanded
),
650 ret
= aesni_set_key(&(ctx
->aes_key_expanded
), key
, key_len
);
653 /*This must be on a 16 byte boundary!*/
654 if ((unsigned long)(&(ctx
->hash_subkey
[0])) % AESNI_ALIGN
) {
658 ret
= rfc4106_set_hash_subkey(ctx
->hash_subkey
, key
, key_len
);
659 memcpy(child_ctx
, ctx
, sizeof(*ctx
));
665 /* This is the Integrity Check Value (aka the authentication tag length and can
666 * be 8, 12 or 16 bytes long. */
667 static int rfc4106_set_authsize(struct crypto_aead
*parent
,
668 unsigned int authsize
)
670 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(parent
);
671 struct crypto_aead
*cryptd_child
= cryptd_aead_child(ctx
->cryptd_tfm
);
681 crypto_aead_crt(parent
)->authsize
= authsize
;
682 crypto_aead_crt(cryptd_child
)->authsize
= authsize
;
686 static int rfc4106_encrypt(struct aead_request
*req
)
689 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
690 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(tfm
);
692 if (!irq_fpu_usable()) {
693 struct aead_request
*cryptd_req
=
694 (struct aead_request
*) aead_request_ctx(req
);
695 memcpy(cryptd_req
, req
, sizeof(*req
));
696 aead_request_set_tfm(cryptd_req
, &ctx
->cryptd_tfm
->base
);
697 return crypto_aead_encrypt(cryptd_req
);
699 struct crypto_aead
*cryptd_child
= cryptd_aead_child(ctx
->cryptd_tfm
);
701 ret
= cryptd_child
->base
.crt_aead
.encrypt(req
);
707 static int rfc4106_decrypt(struct aead_request
*req
)
710 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
711 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(tfm
);
713 if (!irq_fpu_usable()) {
714 struct aead_request
*cryptd_req
=
715 (struct aead_request
*) aead_request_ctx(req
);
716 memcpy(cryptd_req
, req
, sizeof(*req
));
717 aead_request_set_tfm(cryptd_req
, &ctx
->cryptd_tfm
->base
);
718 return crypto_aead_decrypt(cryptd_req
);
720 struct crypto_aead
*cryptd_child
= cryptd_aead_child(ctx
->cryptd_tfm
);
722 ret
= cryptd_child
->base
.crt_aead
.decrypt(req
);
728 static int __driver_rfc4106_encrypt(struct aead_request
*req
)
730 u8 one_entry_in_sg
= 0;
731 u8
*src
, *dst
, *assoc
;
732 __be32 counter
= cpu_to_be32(1);
733 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
734 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(tfm
);
735 void *aes_ctx
= &(ctx
->aes_key_expanded
);
736 unsigned long auth_tag_len
= crypto_aead_authsize(tfm
);
737 u8 iv_tab
[16+AESNI_ALIGN
];
738 u8
* iv
= (u8
*) PTR_ALIGN((u8
*)iv_tab
, AESNI_ALIGN
);
739 struct scatter_walk src_sg_walk
;
740 struct scatter_walk assoc_sg_walk
;
741 struct scatter_walk dst_sg_walk
;
744 /* Assuming we are supporting rfc4106 64-bit extended */
745 /* sequence numbers We need to have the AAD length equal */
746 /* to 8 or 12 bytes */
747 if (unlikely(req
->assoclen
!= 8 && req
->assoclen
!= 12))
750 for (i
= 0; i
< 4; i
++)
751 *(iv
+i
) = ctx
->nonce
[i
];
752 for (i
= 0; i
< 8; i
++)
753 *(iv
+4+i
) = req
->iv
[i
];
754 *((__be32
*)(iv
+12)) = counter
;
756 if ((sg_is_last(req
->src
)) && (sg_is_last(req
->assoc
))) {
758 scatterwalk_start(&src_sg_walk
, req
->src
);
759 scatterwalk_start(&assoc_sg_walk
, req
->assoc
);
760 src
= scatterwalk_map(&src_sg_walk
);
761 assoc
= scatterwalk_map(&assoc_sg_walk
);
763 if (unlikely(req
->src
!= req
->dst
)) {
764 scatterwalk_start(&dst_sg_walk
, req
->dst
);
765 dst
= scatterwalk_map(&dst_sg_walk
);
769 /* Allocate memory for src, dst, assoc */
770 src
= kmalloc(req
->cryptlen
+ auth_tag_len
+ req
->assoclen
,
774 assoc
= (src
+ req
->cryptlen
+ auth_tag_len
);
775 scatterwalk_map_and_copy(src
, req
->src
, 0, req
->cryptlen
, 0);
776 scatterwalk_map_and_copy(assoc
, req
->assoc
, 0,
781 aesni_gcm_enc(aes_ctx
, dst
, src
, (unsigned long)req
->cryptlen
, iv
,
782 ctx
->hash_subkey
, assoc
, (unsigned long)req
->assoclen
, dst
783 + ((unsigned long)req
->cryptlen
), auth_tag_len
);
785 /* The authTag (aka the Integrity Check Value) needs to be written
786 * back to the packet. */
787 if (one_entry_in_sg
) {
788 if (unlikely(req
->src
!= req
->dst
)) {
789 scatterwalk_unmap(dst
);
790 scatterwalk_done(&dst_sg_walk
, 0, 0);
792 scatterwalk_unmap(src
);
793 scatterwalk_unmap(assoc
);
794 scatterwalk_done(&src_sg_walk
, 0, 0);
795 scatterwalk_done(&assoc_sg_walk
, 0, 0);
797 scatterwalk_map_and_copy(dst
, req
->dst
, 0,
798 req
->cryptlen
+ auth_tag_len
, 1);
804 static int __driver_rfc4106_decrypt(struct aead_request
*req
)
806 u8 one_entry_in_sg
= 0;
807 u8
*src
, *dst
, *assoc
;
808 unsigned long tempCipherLen
= 0;
809 __be32 counter
= cpu_to_be32(1);
811 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
812 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(tfm
);
813 void *aes_ctx
= &(ctx
->aes_key_expanded
);
814 unsigned long auth_tag_len
= crypto_aead_authsize(tfm
);
815 u8 iv_and_authTag
[32+AESNI_ALIGN
];
816 u8
*iv
= (u8
*) PTR_ALIGN((u8
*)iv_and_authTag
, AESNI_ALIGN
);
817 u8
*authTag
= iv
+ 16;
818 struct scatter_walk src_sg_walk
;
819 struct scatter_walk assoc_sg_walk
;
820 struct scatter_walk dst_sg_walk
;
823 if (unlikely((req
->cryptlen
< auth_tag_len
) ||
824 (req
->assoclen
!= 8 && req
->assoclen
!= 12)))
826 /* Assuming we are supporting rfc4106 64-bit extended */
827 /* sequence numbers We need to have the AAD length */
828 /* equal to 8 or 12 bytes */
830 tempCipherLen
= (unsigned long)(req
->cryptlen
- auth_tag_len
);
832 for (i
= 0; i
< 4; i
++)
833 *(iv
+i
) = ctx
->nonce
[i
];
834 for (i
= 0; i
< 8; i
++)
835 *(iv
+4+i
) = req
->iv
[i
];
836 *((__be32
*)(iv
+12)) = counter
;
838 if ((sg_is_last(req
->src
)) && (sg_is_last(req
->assoc
))) {
840 scatterwalk_start(&src_sg_walk
, req
->src
);
841 scatterwalk_start(&assoc_sg_walk
, req
->assoc
);
842 src
= scatterwalk_map(&src_sg_walk
);
843 assoc
= scatterwalk_map(&assoc_sg_walk
);
845 if (unlikely(req
->src
!= req
->dst
)) {
846 scatterwalk_start(&dst_sg_walk
, req
->dst
);
847 dst
= scatterwalk_map(&dst_sg_walk
);
851 /* Allocate memory for src, dst, assoc */
852 src
= kmalloc(req
->cryptlen
+ req
->assoclen
, GFP_ATOMIC
);
855 assoc
= (src
+ req
->cryptlen
+ auth_tag_len
);
856 scatterwalk_map_and_copy(src
, req
->src
, 0, req
->cryptlen
, 0);
857 scatterwalk_map_and_copy(assoc
, req
->assoc
, 0,
862 aesni_gcm_dec(aes_ctx
, dst
, src
, tempCipherLen
, iv
,
863 ctx
->hash_subkey
, assoc
, (unsigned long)req
->assoclen
,
864 authTag
, auth_tag_len
);
866 /* Compare generated tag with passed in tag. */
867 retval
= memcmp(src
+ tempCipherLen
, authTag
, auth_tag_len
) ?
870 if (one_entry_in_sg
) {
871 if (unlikely(req
->src
!= req
->dst
)) {
872 scatterwalk_unmap(dst
);
873 scatterwalk_done(&dst_sg_walk
, 0, 0);
875 scatterwalk_unmap(src
);
876 scatterwalk_unmap(assoc
);
877 scatterwalk_done(&src_sg_walk
, 0, 0);
878 scatterwalk_done(&assoc_sg_walk
, 0, 0);
880 scatterwalk_map_and_copy(dst
, req
->dst
, 0, req
->cryptlen
, 1);
887 static struct crypto_alg aesni_algs
[] = { {
889 .cra_driver_name
= "aes-aesni",
891 .cra_flags
= CRYPTO_ALG_TYPE_CIPHER
,
892 .cra_blocksize
= AES_BLOCK_SIZE
,
893 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
) +
896 .cra_module
= THIS_MODULE
,
899 .cia_min_keysize
= AES_MIN_KEY_SIZE
,
900 .cia_max_keysize
= AES_MAX_KEY_SIZE
,
901 .cia_setkey
= aes_set_key
,
902 .cia_encrypt
= aes_encrypt
,
903 .cia_decrypt
= aes_decrypt
907 .cra_name
= "__aes-aesni",
908 .cra_driver_name
= "__driver-aes-aesni",
910 .cra_flags
= CRYPTO_ALG_TYPE_CIPHER
,
911 .cra_blocksize
= AES_BLOCK_SIZE
,
912 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
) +
915 .cra_module
= THIS_MODULE
,
918 .cia_min_keysize
= AES_MIN_KEY_SIZE
,
919 .cia_max_keysize
= AES_MAX_KEY_SIZE
,
920 .cia_setkey
= aes_set_key
,
921 .cia_encrypt
= __aes_encrypt
,
922 .cia_decrypt
= __aes_decrypt
926 .cra_name
= "__ecb-aes-aesni",
927 .cra_driver_name
= "__driver-ecb-aes-aesni",
929 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
,
930 .cra_blocksize
= AES_BLOCK_SIZE
,
931 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
) +
934 .cra_type
= &crypto_blkcipher_type
,
935 .cra_module
= THIS_MODULE
,
938 .min_keysize
= AES_MIN_KEY_SIZE
,
939 .max_keysize
= AES_MAX_KEY_SIZE
,
940 .setkey
= aes_set_key
,
941 .encrypt
= ecb_encrypt
,
942 .decrypt
= ecb_decrypt
,
946 .cra_name
= "__cbc-aes-aesni",
947 .cra_driver_name
= "__driver-cbc-aes-aesni",
949 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
,
950 .cra_blocksize
= AES_BLOCK_SIZE
,
951 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
) +
954 .cra_type
= &crypto_blkcipher_type
,
955 .cra_module
= THIS_MODULE
,
958 .min_keysize
= AES_MIN_KEY_SIZE
,
959 .max_keysize
= AES_MAX_KEY_SIZE
,
960 .setkey
= aes_set_key
,
961 .encrypt
= cbc_encrypt
,
962 .decrypt
= cbc_decrypt
,
966 .cra_name
= "ecb(aes)",
967 .cra_driver_name
= "ecb-aes-aesni",
969 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
970 .cra_blocksize
= AES_BLOCK_SIZE
,
971 .cra_ctxsize
= sizeof(struct async_aes_ctx
),
973 .cra_type
= &crypto_ablkcipher_type
,
974 .cra_module
= THIS_MODULE
,
975 .cra_init
= ablk_ecb_init
,
976 .cra_exit
= ablk_exit
,
979 .min_keysize
= AES_MIN_KEY_SIZE
,
980 .max_keysize
= AES_MAX_KEY_SIZE
,
981 .setkey
= ablk_set_key
,
982 .encrypt
= ablk_encrypt
,
983 .decrypt
= ablk_decrypt
,
987 .cra_name
= "cbc(aes)",
988 .cra_driver_name
= "cbc-aes-aesni",
990 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
991 .cra_blocksize
= AES_BLOCK_SIZE
,
992 .cra_ctxsize
= sizeof(struct async_aes_ctx
),
994 .cra_type
= &crypto_ablkcipher_type
,
995 .cra_module
= THIS_MODULE
,
996 .cra_init
= ablk_cbc_init
,
997 .cra_exit
= ablk_exit
,
1000 .min_keysize
= AES_MIN_KEY_SIZE
,
1001 .max_keysize
= AES_MAX_KEY_SIZE
,
1002 .ivsize
= AES_BLOCK_SIZE
,
1003 .setkey
= ablk_set_key
,
1004 .encrypt
= ablk_encrypt
,
1005 .decrypt
= ablk_decrypt
,
1008 #ifdef CONFIG_X86_64
1010 .cra_name
= "__ctr-aes-aesni",
1011 .cra_driver_name
= "__driver-ctr-aes-aesni",
1013 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
,
1015 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
) +
1018 .cra_type
= &crypto_blkcipher_type
,
1019 .cra_module
= THIS_MODULE
,
1022 .min_keysize
= AES_MIN_KEY_SIZE
,
1023 .max_keysize
= AES_MAX_KEY_SIZE
,
1024 .ivsize
= AES_BLOCK_SIZE
,
1025 .setkey
= aes_set_key
,
1026 .encrypt
= ctr_crypt
,
1027 .decrypt
= ctr_crypt
,
1031 .cra_name
= "ctr(aes)",
1032 .cra_driver_name
= "ctr-aes-aesni",
1033 .cra_priority
= 400,
1034 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1036 .cra_ctxsize
= sizeof(struct async_aes_ctx
),
1038 .cra_type
= &crypto_ablkcipher_type
,
1039 .cra_module
= THIS_MODULE
,
1040 .cra_init
= ablk_ctr_init
,
1041 .cra_exit
= ablk_exit
,
1044 .min_keysize
= AES_MIN_KEY_SIZE
,
1045 .max_keysize
= AES_MAX_KEY_SIZE
,
1046 .ivsize
= AES_BLOCK_SIZE
,
1047 .setkey
= ablk_set_key
,
1048 .encrypt
= ablk_encrypt
,
1049 .decrypt
= ablk_encrypt
,
1054 .cra_name
= "__gcm-aes-aesni",
1055 .cra_driver_name
= "__driver-gcm-aes-aesni",
1057 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
,
1059 .cra_ctxsize
= sizeof(struct aesni_rfc4106_gcm_ctx
) +
1062 .cra_type
= &crypto_aead_type
,
1063 .cra_module
= THIS_MODULE
,
1066 .encrypt
= __driver_rfc4106_encrypt
,
1067 .decrypt
= __driver_rfc4106_decrypt
,
1071 .cra_name
= "rfc4106(gcm(aes))",
1072 .cra_driver_name
= "rfc4106-gcm-aesni",
1073 .cra_priority
= 400,
1074 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
1076 .cra_ctxsize
= sizeof(struct aesni_rfc4106_gcm_ctx
) +
1079 .cra_type
= &crypto_nivaead_type
,
1080 .cra_module
= THIS_MODULE
,
1081 .cra_init
= rfc4106_init
,
1082 .cra_exit
= rfc4106_exit
,
1085 .setkey
= rfc4106_set_key
,
1086 .setauthsize
= rfc4106_set_authsize
,
1087 .encrypt
= rfc4106_encrypt
,
1088 .decrypt
= rfc4106_decrypt
,
1096 .cra_name
= "rfc3686(ctr(aes))",
1097 .cra_driver_name
= "rfc3686-ctr-aes-aesni",
1098 .cra_priority
= 400,
1099 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1101 .cra_ctxsize
= sizeof(struct async_aes_ctx
),
1103 .cra_type
= &crypto_ablkcipher_type
,
1104 .cra_module
= THIS_MODULE
,
1105 .cra_init
= ablk_rfc3686_ctr_init
,
1106 .cra_exit
= ablk_exit
,
1109 .min_keysize
= AES_MIN_KEY_SIZE
+
1110 CTR_RFC3686_NONCE_SIZE
,
1111 .max_keysize
= AES_MAX_KEY_SIZE
+
1112 CTR_RFC3686_NONCE_SIZE
,
1113 .ivsize
= CTR_RFC3686_IV_SIZE
,
1114 .setkey
= ablk_set_key
,
1115 .encrypt
= ablk_encrypt
,
1116 .decrypt
= ablk_decrypt
,
1124 .cra_name
= "lrw(aes)",
1125 .cra_driver_name
= "lrw-aes-aesni",
1126 .cra_priority
= 400,
1127 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1128 .cra_blocksize
= AES_BLOCK_SIZE
,
1129 .cra_ctxsize
= sizeof(struct async_aes_ctx
),
1131 .cra_type
= &crypto_ablkcipher_type
,
1132 .cra_module
= THIS_MODULE
,
1133 .cra_init
= ablk_lrw_init
,
1134 .cra_exit
= ablk_exit
,
1137 .min_keysize
= AES_MIN_KEY_SIZE
+ AES_BLOCK_SIZE
,
1138 .max_keysize
= AES_MAX_KEY_SIZE
+ AES_BLOCK_SIZE
,
1139 .ivsize
= AES_BLOCK_SIZE
,
1140 .setkey
= ablk_set_key
,
1141 .encrypt
= ablk_encrypt
,
1142 .decrypt
= ablk_decrypt
,
1148 .cra_name
= "pcbc(aes)",
1149 .cra_driver_name
= "pcbc-aes-aesni",
1150 .cra_priority
= 400,
1151 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1152 .cra_blocksize
= AES_BLOCK_SIZE
,
1153 .cra_ctxsize
= sizeof(struct async_aes_ctx
),
1155 .cra_type
= &crypto_ablkcipher_type
,
1156 .cra_module
= THIS_MODULE
,
1157 .cra_init
= ablk_pcbc_init
,
1158 .cra_exit
= ablk_exit
,
1161 .min_keysize
= AES_MIN_KEY_SIZE
,
1162 .max_keysize
= AES_MAX_KEY_SIZE
,
1163 .ivsize
= AES_BLOCK_SIZE
,
1164 .setkey
= ablk_set_key
,
1165 .encrypt
= ablk_encrypt
,
1166 .decrypt
= ablk_decrypt
,
1172 .cra_name
= "xts(aes)",
1173 .cra_driver_name
= "xts-aes-aesni",
1174 .cra_priority
= 400,
1175 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1176 .cra_blocksize
= AES_BLOCK_SIZE
,
1177 .cra_ctxsize
= sizeof(struct async_aes_ctx
),
1179 .cra_type
= &crypto_ablkcipher_type
,
1180 .cra_module
= THIS_MODULE
,
1181 .cra_init
= ablk_xts_init
,
1182 .cra_exit
= ablk_exit
,
1185 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
1186 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
1187 .ivsize
= AES_BLOCK_SIZE
,
1188 .setkey
= ablk_set_key
,
1189 .encrypt
= ablk_encrypt
,
1190 .decrypt
= ablk_decrypt
,
1197 static const struct x86_cpu_id aesni_cpu_id
[] = {
1198 X86_FEATURE_MATCH(X86_FEATURE_AES
),
1201 MODULE_DEVICE_TABLE(x86cpu
, aesni_cpu_id
);
1203 static int __init
aesni_init(void)
1207 if (!x86_match_cpu(aesni_cpu_id
))
1210 err
= crypto_fpu_init();
1214 for (i
= 0; i
< ARRAY_SIZE(aesni_algs
); i
++)
1215 INIT_LIST_HEAD(&aesni_algs
[i
].cra_list
);
1217 return crypto_register_algs(aesni_algs
, ARRAY_SIZE(aesni_algs
));
1220 static void __exit
aesni_exit(void)
1222 crypto_unregister_algs(aesni_algs
, ARRAY_SIZE(aesni_algs
));
1227 module_init(aesni_init
);
1228 module_exit(aesni_exit
);
1230 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1231 MODULE_LICENSE("GPL");
1232 MODULE_ALIAS("aes");