wl18xx: implement fw status debugfs entries
[linux-2.6/libata-dev.git] / arch / x86 / crypto / aesni-intel_glue.c
blobac7f5cd019e876f44eaaa3330bc9ef87f61b6c3d
1 /*
2 * Support for Intel AES-NI instructions. This file contains glue
3 * code, the real AES implementation is in intel-aes_asm.S.
5 * Copyright (C) 2008, Intel Corp.
6 * Author: Huang Ying <ying.huang@intel.com>
8 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9 * interface for 64-bit kernels.
10 * Authors: Adrian Hoban <adrian.hoban@intel.com>
11 * Gabriele Paoloni <gabriele.paoloni@intel.com>
12 * Tadeusz Struk (tadeusz.struk@intel.com)
13 * Aidan O'Mahony (aidan.o.mahony@intel.com)
14 * Copyright (c) 2010, Intel Corporation.
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
22 #include <linux/hardirq.h>
23 #include <linux/types.h>
24 #include <linux/crypto.h>
25 #include <linux/module.h>
26 #include <linux/err.h>
27 #include <crypto/algapi.h>
28 #include <crypto/aes.h>
29 #include <crypto/cryptd.h>
30 #include <crypto/ctr.h>
31 #include <asm/cpu_device_id.h>
32 #include <asm/i387.h>
33 #include <asm/aes.h>
34 #include <crypto/scatterwalk.h>
35 #include <crypto/internal/aead.h>
36 #include <linux/workqueue.h>
37 #include <linux/spinlock.h>
39 #if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE)
40 #define HAS_CTR
41 #endif
43 #if defined(CONFIG_CRYPTO_LRW) || defined(CONFIG_CRYPTO_LRW_MODULE)
44 #define HAS_LRW
45 #endif
47 #if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
48 #define HAS_PCBC
49 #endif
51 #if defined(CONFIG_CRYPTO_XTS) || defined(CONFIG_CRYPTO_XTS_MODULE)
52 #define HAS_XTS
53 #endif
55 struct async_aes_ctx {
56 struct cryptd_ablkcipher *cryptd_tfm;
59 /* This data is stored at the end of the crypto_tfm struct.
60 * It's a type of per "session" data storage location.
61 * This needs to be 16 byte aligned.
63 struct aesni_rfc4106_gcm_ctx {
64 u8 hash_subkey[16];
65 struct crypto_aes_ctx aes_key_expanded;
66 u8 nonce[4];
67 struct cryptd_aead *cryptd_tfm;
70 struct aesni_gcm_set_hash_subkey_result {
71 int err;
72 struct completion completion;
75 struct aesni_hash_subkey_req_data {
76 u8 iv[16];
77 struct aesni_gcm_set_hash_subkey_result result;
78 struct scatterlist sg;
81 #define AESNI_ALIGN (16)
82 #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
83 #define RFC4106_HASH_SUBKEY_SIZE 16
85 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
86 unsigned int key_len);
87 asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
88 const u8 *in);
89 asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
90 const u8 *in);
91 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
92 const u8 *in, unsigned int len);
93 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
94 const u8 *in, unsigned int len);
95 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
96 const u8 *in, unsigned int len, u8 *iv);
97 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
98 const u8 *in, unsigned int len, u8 *iv);
100 int crypto_fpu_init(void);
101 void crypto_fpu_exit(void);
103 #ifdef CONFIG_X86_64
104 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
105 const u8 *in, unsigned int len, u8 *iv);
107 /* asmlinkage void aesni_gcm_enc()
108 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
109 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
110 * const u8 *in, Plaintext input
111 * unsigned long plaintext_len, Length of data in bytes for encryption.
112 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
113 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
114 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
115 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
116 * const u8 *aad, Additional Authentication Data (AAD)
117 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
118 * is going to be 8 or 12 bytes
119 * u8 *auth_tag, Authenticated Tag output.
120 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
121 * Valid values are 16 (most likely), 12 or 8.
123 asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
124 const u8 *in, unsigned long plaintext_len, u8 *iv,
125 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
126 u8 *auth_tag, unsigned long auth_tag_len);
128 /* asmlinkage void aesni_gcm_dec()
129 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
130 * u8 *out, Plaintext output. Decrypt in-place is allowed.
131 * const u8 *in, Ciphertext input
132 * unsigned long ciphertext_len, Length of data in bytes for decryption.
133 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
134 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
135 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
136 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
137 * const u8 *aad, Additional Authentication Data (AAD)
138 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
139 * to be 8 or 12 bytes
140 * u8 *auth_tag, Authenticated Tag output.
141 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
142 * Valid values are 16 (most likely), 12 or 8.
144 asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
145 const u8 *in, unsigned long ciphertext_len, u8 *iv,
146 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
147 u8 *auth_tag, unsigned long auth_tag_len);
149 static inline struct
150 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
152 return
153 (struct aesni_rfc4106_gcm_ctx *)
154 PTR_ALIGN((u8 *)
155 crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN);
157 #endif
159 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
161 unsigned long addr = (unsigned long)raw_ctx;
162 unsigned long align = AESNI_ALIGN;
164 if (align <= crypto_tfm_ctx_alignment())
165 align = 1;
166 return (struct crypto_aes_ctx *)ALIGN(addr, align);
169 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
170 const u8 *in_key, unsigned int key_len)
172 struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
173 u32 *flags = &tfm->crt_flags;
174 int err;
176 if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
177 key_len != AES_KEYSIZE_256) {
178 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
179 return -EINVAL;
182 if (!irq_fpu_usable())
183 err = crypto_aes_expand_key(ctx, in_key, key_len);
184 else {
185 kernel_fpu_begin();
186 err = aesni_set_key(ctx, in_key, key_len);
187 kernel_fpu_end();
190 return err;
193 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
194 unsigned int key_len)
196 return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
199 static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
201 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
203 if (!irq_fpu_usable())
204 crypto_aes_encrypt_x86(ctx, dst, src);
205 else {
206 kernel_fpu_begin();
207 aesni_enc(ctx, dst, src);
208 kernel_fpu_end();
212 static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
214 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
216 if (!irq_fpu_usable())
217 crypto_aes_decrypt_x86(ctx, dst, src);
218 else {
219 kernel_fpu_begin();
220 aesni_dec(ctx, dst, src);
221 kernel_fpu_end();
225 static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
227 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
229 aesni_enc(ctx, dst, src);
232 static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
234 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
236 aesni_dec(ctx, dst, src);
239 static int ecb_encrypt(struct blkcipher_desc *desc,
240 struct scatterlist *dst, struct scatterlist *src,
241 unsigned int nbytes)
243 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
244 struct blkcipher_walk walk;
245 int err;
247 blkcipher_walk_init(&walk, dst, src, nbytes);
248 err = blkcipher_walk_virt(desc, &walk);
249 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
251 kernel_fpu_begin();
252 while ((nbytes = walk.nbytes)) {
253 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
254 nbytes & AES_BLOCK_MASK);
255 nbytes &= AES_BLOCK_SIZE - 1;
256 err = blkcipher_walk_done(desc, &walk, nbytes);
258 kernel_fpu_end();
260 return err;
263 static int ecb_decrypt(struct blkcipher_desc *desc,
264 struct scatterlist *dst, struct scatterlist *src,
265 unsigned int nbytes)
267 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
268 struct blkcipher_walk walk;
269 int err;
271 blkcipher_walk_init(&walk, dst, src, nbytes);
272 err = blkcipher_walk_virt(desc, &walk);
273 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
275 kernel_fpu_begin();
276 while ((nbytes = walk.nbytes)) {
277 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
278 nbytes & AES_BLOCK_MASK);
279 nbytes &= AES_BLOCK_SIZE - 1;
280 err = blkcipher_walk_done(desc, &walk, nbytes);
282 kernel_fpu_end();
284 return err;
287 static int cbc_encrypt(struct blkcipher_desc *desc,
288 struct scatterlist *dst, struct scatterlist *src,
289 unsigned int nbytes)
291 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
292 struct blkcipher_walk walk;
293 int err;
295 blkcipher_walk_init(&walk, dst, src, nbytes);
296 err = blkcipher_walk_virt(desc, &walk);
297 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
299 kernel_fpu_begin();
300 while ((nbytes = walk.nbytes)) {
301 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
302 nbytes & AES_BLOCK_MASK, walk.iv);
303 nbytes &= AES_BLOCK_SIZE - 1;
304 err = blkcipher_walk_done(desc, &walk, nbytes);
306 kernel_fpu_end();
308 return err;
311 static int cbc_decrypt(struct blkcipher_desc *desc,
312 struct scatterlist *dst, struct scatterlist *src,
313 unsigned int nbytes)
315 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
316 struct blkcipher_walk walk;
317 int err;
319 blkcipher_walk_init(&walk, dst, src, nbytes);
320 err = blkcipher_walk_virt(desc, &walk);
321 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
323 kernel_fpu_begin();
324 while ((nbytes = walk.nbytes)) {
325 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
326 nbytes & AES_BLOCK_MASK, walk.iv);
327 nbytes &= AES_BLOCK_SIZE - 1;
328 err = blkcipher_walk_done(desc, &walk, nbytes);
330 kernel_fpu_end();
332 return err;
335 #ifdef CONFIG_X86_64
336 static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
337 struct blkcipher_walk *walk)
339 u8 *ctrblk = walk->iv;
340 u8 keystream[AES_BLOCK_SIZE];
341 u8 *src = walk->src.virt.addr;
342 u8 *dst = walk->dst.virt.addr;
343 unsigned int nbytes = walk->nbytes;
345 aesni_enc(ctx, keystream, ctrblk);
346 crypto_xor(keystream, src, nbytes);
347 memcpy(dst, keystream, nbytes);
348 crypto_inc(ctrblk, AES_BLOCK_SIZE);
351 static int ctr_crypt(struct blkcipher_desc *desc,
352 struct scatterlist *dst, struct scatterlist *src,
353 unsigned int nbytes)
355 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
356 struct blkcipher_walk walk;
357 int err;
359 blkcipher_walk_init(&walk, dst, src, nbytes);
360 err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
361 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
363 kernel_fpu_begin();
364 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
365 aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
366 nbytes & AES_BLOCK_MASK, walk.iv);
367 nbytes &= AES_BLOCK_SIZE - 1;
368 err = blkcipher_walk_done(desc, &walk, nbytes);
370 if (walk.nbytes) {
371 ctr_crypt_final(ctx, &walk);
372 err = blkcipher_walk_done(desc, &walk, 0);
374 kernel_fpu_end();
376 return err;
378 #endif
380 static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
381 unsigned int key_len)
383 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
384 struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base;
385 int err;
387 crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
388 crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm)
389 & CRYPTO_TFM_REQ_MASK);
390 err = crypto_ablkcipher_setkey(child, key, key_len);
391 crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child)
392 & CRYPTO_TFM_RES_MASK);
393 return err;
396 static int ablk_encrypt(struct ablkcipher_request *req)
398 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
399 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
401 if (!irq_fpu_usable()) {
402 struct ablkcipher_request *cryptd_req =
403 ablkcipher_request_ctx(req);
404 memcpy(cryptd_req, req, sizeof(*req));
405 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
406 return crypto_ablkcipher_encrypt(cryptd_req);
407 } else {
408 struct blkcipher_desc desc;
409 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
410 desc.info = req->info;
411 desc.flags = 0;
412 return crypto_blkcipher_crt(desc.tfm)->encrypt(
413 &desc, req->dst, req->src, req->nbytes);
417 static int ablk_decrypt(struct ablkcipher_request *req)
419 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
420 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
422 if (!irq_fpu_usable()) {
423 struct ablkcipher_request *cryptd_req =
424 ablkcipher_request_ctx(req);
425 memcpy(cryptd_req, req, sizeof(*req));
426 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
427 return crypto_ablkcipher_decrypt(cryptd_req);
428 } else {
429 struct blkcipher_desc desc;
430 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
431 desc.info = req->info;
432 desc.flags = 0;
433 return crypto_blkcipher_crt(desc.tfm)->decrypt(
434 &desc, req->dst, req->src, req->nbytes);
438 static void ablk_exit(struct crypto_tfm *tfm)
440 struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
442 cryptd_free_ablkcipher(ctx->cryptd_tfm);
445 static int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name)
447 struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
448 struct cryptd_ablkcipher *cryptd_tfm;
450 cryptd_tfm = cryptd_alloc_ablkcipher(drv_name, 0, 0);
451 if (IS_ERR(cryptd_tfm))
452 return PTR_ERR(cryptd_tfm);
454 ctx->cryptd_tfm = cryptd_tfm;
455 tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
456 crypto_ablkcipher_reqsize(&cryptd_tfm->base);
458 return 0;
461 static int ablk_ecb_init(struct crypto_tfm *tfm)
463 return ablk_init_common(tfm, "__driver-ecb-aes-aesni");
466 static int ablk_cbc_init(struct crypto_tfm *tfm)
468 return ablk_init_common(tfm, "__driver-cbc-aes-aesni");
471 #ifdef CONFIG_X86_64
472 static int ablk_ctr_init(struct crypto_tfm *tfm)
474 return ablk_init_common(tfm, "__driver-ctr-aes-aesni");
477 #ifdef HAS_CTR
478 static int ablk_rfc3686_ctr_init(struct crypto_tfm *tfm)
480 return ablk_init_common(tfm, "rfc3686(__driver-ctr-aes-aesni)");
482 #endif
483 #endif
485 #ifdef HAS_LRW
486 static int ablk_lrw_init(struct crypto_tfm *tfm)
488 return ablk_init_common(tfm, "fpu(lrw(__driver-aes-aesni))");
490 #endif
492 #ifdef HAS_PCBC
493 static int ablk_pcbc_init(struct crypto_tfm *tfm)
495 return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))");
497 #endif
499 #ifdef HAS_XTS
500 static int ablk_xts_init(struct crypto_tfm *tfm)
502 return ablk_init_common(tfm, "fpu(xts(__driver-aes-aesni))");
504 #endif
506 #ifdef CONFIG_X86_64
507 static int rfc4106_init(struct crypto_tfm *tfm)
509 struct cryptd_aead *cryptd_tfm;
510 struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
511 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
512 struct crypto_aead *cryptd_child;
513 struct aesni_rfc4106_gcm_ctx *child_ctx;
514 cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
515 if (IS_ERR(cryptd_tfm))
516 return PTR_ERR(cryptd_tfm);
518 cryptd_child = cryptd_aead_child(cryptd_tfm);
519 child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child);
520 memcpy(child_ctx, ctx, sizeof(*ctx));
521 ctx->cryptd_tfm = cryptd_tfm;
522 tfm->crt_aead.reqsize = sizeof(struct aead_request)
523 + crypto_aead_reqsize(&cryptd_tfm->base);
524 return 0;
527 static void rfc4106_exit(struct crypto_tfm *tfm)
529 struct aesni_rfc4106_gcm_ctx *ctx =
530 (struct aesni_rfc4106_gcm_ctx *)
531 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
532 if (!IS_ERR(ctx->cryptd_tfm))
533 cryptd_free_aead(ctx->cryptd_tfm);
534 return;
537 static void
538 rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
540 struct aesni_gcm_set_hash_subkey_result *result = req->data;
542 if (err == -EINPROGRESS)
543 return;
544 result->err = err;
545 complete(&result->completion);
548 static int
549 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
551 struct crypto_ablkcipher *ctr_tfm;
552 struct ablkcipher_request *req;
553 int ret = -EINVAL;
554 struct aesni_hash_subkey_req_data *req_data;
556 ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
557 if (IS_ERR(ctr_tfm))
558 return PTR_ERR(ctr_tfm);
560 crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
562 ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
563 if (ret)
564 goto out_free_ablkcipher;
566 ret = -ENOMEM;
567 req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
568 if (!req)
569 goto out_free_ablkcipher;
571 req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
572 if (!req_data)
573 goto out_free_request;
575 memset(req_data->iv, 0, sizeof(req_data->iv));
577 /* Clear the data in the hash sub key container to zero.*/
578 /* We want to cipher all zeros to create the hash sub key. */
579 memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
581 init_completion(&req_data->result.completion);
582 sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
583 ablkcipher_request_set_tfm(req, ctr_tfm);
584 ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
585 CRYPTO_TFM_REQ_MAY_BACKLOG,
586 rfc4106_set_hash_subkey_done,
587 &req_data->result);
589 ablkcipher_request_set_crypt(req, &req_data->sg,
590 &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
592 ret = crypto_ablkcipher_encrypt(req);
593 if (ret == -EINPROGRESS || ret == -EBUSY) {
594 ret = wait_for_completion_interruptible
595 (&req_data->result.completion);
596 if (!ret)
597 ret = req_data->result.err;
599 kfree(req_data);
600 out_free_request:
601 ablkcipher_request_free(req);
602 out_free_ablkcipher:
603 crypto_free_ablkcipher(ctr_tfm);
604 return ret;
607 static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
608 unsigned int key_len)
610 int ret = 0;
611 struct crypto_tfm *tfm = crypto_aead_tfm(parent);
612 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
613 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
614 struct aesni_rfc4106_gcm_ctx *child_ctx =
615 aesni_rfc4106_gcm_ctx_get(cryptd_child);
616 u8 *new_key_mem = NULL;
618 if (key_len < 4) {
619 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
620 return -EINVAL;
622 /*Account for 4 byte nonce at the end.*/
623 key_len -= 4;
624 if (key_len != AES_KEYSIZE_128) {
625 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
626 return -EINVAL;
629 memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
630 /*This must be on a 16 byte boundary!*/
631 if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
632 return -EINVAL;
634 if ((unsigned long)key % AESNI_ALIGN) {
635 /*key is not aligned: use an auxuliar aligned pointer*/
636 new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
637 if (!new_key_mem)
638 return -ENOMEM;
640 new_key_mem = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
641 memcpy(new_key_mem, key, key_len);
642 key = new_key_mem;
645 if (!irq_fpu_usable())
646 ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
647 key, key_len);
648 else {
649 kernel_fpu_begin();
650 ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
651 kernel_fpu_end();
653 /*This must be on a 16 byte boundary!*/
654 if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
655 ret = -EINVAL;
656 goto exit;
658 ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
659 memcpy(child_ctx, ctx, sizeof(*ctx));
660 exit:
661 kfree(new_key_mem);
662 return ret;
665 /* This is the Integrity Check Value (aka the authentication tag length and can
666 * be 8, 12 or 16 bytes long. */
667 static int rfc4106_set_authsize(struct crypto_aead *parent,
668 unsigned int authsize)
670 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
671 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
673 switch (authsize) {
674 case 8:
675 case 12:
676 case 16:
677 break;
678 default:
679 return -EINVAL;
681 crypto_aead_crt(parent)->authsize = authsize;
682 crypto_aead_crt(cryptd_child)->authsize = authsize;
683 return 0;
686 static int rfc4106_encrypt(struct aead_request *req)
688 int ret;
689 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
690 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
692 if (!irq_fpu_usable()) {
693 struct aead_request *cryptd_req =
694 (struct aead_request *) aead_request_ctx(req);
695 memcpy(cryptd_req, req, sizeof(*req));
696 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
697 return crypto_aead_encrypt(cryptd_req);
698 } else {
699 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
700 kernel_fpu_begin();
701 ret = cryptd_child->base.crt_aead.encrypt(req);
702 kernel_fpu_end();
703 return ret;
707 static int rfc4106_decrypt(struct aead_request *req)
709 int ret;
710 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
711 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
713 if (!irq_fpu_usable()) {
714 struct aead_request *cryptd_req =
715 (struct aead_request *) aead_request_ctx(req);
716 memcpy(cryptd_req, req, sizeof(*req));
717 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
718 return crypto_aead_decrypt(cryptd_req);
719 } else {
720 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
721 kernel_fpu_begin();
722 ret = cryptd_child->base.crt_aead.decrypt(req);
723 kernel_fpu_end();
724 return ret;
728 static int __driver_rfc4106_encrypt(struct aead_request *req)
730 u8 one_entry_in_sg = 0;
731 u8 *src, *dst, *assoc;
732 __be32 counter = cpu_to_be32(1);
733 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
734 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
735 void *aes_ctx = &(ctx->aes_key_expanded);
736 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
737 u8 iv_tab[16+AESNI_ALIGN];
738 u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
739 struct scatter_walk src_sg_walk;
740 struct scatter_walk assoc_sg_walk;
741 struct scatter_walk dst_sg_walk;
742 unsigned int i;
744 /* Assuming we are supporting rfc4106 64-bit extended */
745 /* sequence numbers We need to have the AAD length equal */
746 /* to 8 or 12 bytes */
747 if (unlikely(req->assoclen != 8 && req->assoclen != 12))
748 return -EINVAL;
749 /* IV below built */
750 for (i = 0; i < 4; i++)
751 *(iv+i) = ctx->nonce[i];
752 for (i = 0; i < 8; i++)
753 *(iv+4+i) = req->iv[i];
754 *((__be32 *)(iv+12)) = counter;
756 if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
757 one_entry_in_sg = 1;
758 scatterwalk_start(&src_sg_walk, req->src);
759 scatterwalk_start(&assoc_sg_walk, req->assoc);
760 src = scatterwalk_map(&src_sg_walk);
761 assoc = scatterwalk_map(&assoc_sg_walk);
762 dst = src;
763 if (unlikely(req->src != req->dst)) {
764 scatterwalk_start(&dst_sg_walk, req->dst);
765 dst = scatterwalk_map(&dst_sg_walk);
768 } else {
769 /* Allocate memory for src, dst, assoc */
770 src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
771 GFP_ATOMIC);
772 if (unlikely(!src))
773 return -ENOMEM;
774 assoc = (src + req->cryptlen + auth_tag_len);
775 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
776 scatterwalk_map_and_copy(assoc, req->assoc, 0,
777 req->assoclen, 0);
778 dst = src;
781 aesni_gcm_enc(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
782 ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
783 + ((unsigned long)req->cryptlen), auth_tag_len);
785 /* The authTag (aka the Integrity Check Value) needs to be written
786 * back to the packet. */
787 if (one_entry_in_sg) {
788 if (unlikely(req->src != req->dst)) {
789 scatterwalk_unmap(dst);
790 scatterwalk_done(&dst_sg_walk, 0, 0);
792 scatterwalk_unmap(src);
793 scatterwalk_unmap(assoc);
794 scatterwalk_done(&src_sg_walk, 0, 0);
795 scatterwalk_done(&assoc_sg_walk, 0, 0);
796 } else {
797 scatterwalk_map_and_copy(dst, req->dst, 0,
798 req->cryptlen + auth_tag_len, 1);
799 kfree(src);
801 return 0;
804 static int __driver_rfc4106_decrypt(struct aead_request *req)
806 u8 one_entry_in_sg = 0;
807 u8 *src, *dst, *assoc;
808 unsigned long tempCipherLen = 0;
809 __be32 counter = cpu_to_be32(1);
810 int retval = 0;
811 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
812 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
813 void *aes_ctx = &(ctx->aes_key_expanded);
814 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
815 u8 iv_and_authTag[32+AESNI_ALIGN];
816 u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
817 u8 *authTag = iv + 16;
818 struct scatter_walk src_sg_walk;
819 struct scatter_walk assoc_sg_walk;
820 struct scatter_walk dst_sg_walk;
821 unsigned int i;
823 if (unlikely((req->cryptlen < auth_tag_len) ||
824 (req->assoclen != 8 && req->assoclen != 12)))
825 return -EINVAL;
826 /* Assuming we are supporting rfc4106 64-bit extended */
827 /* sequence numbers We need to have the AAD length */
828 /* equal to 8 or 12 bytes */
830 tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
831 /* IV below built */
832 for (i = 0; i < 4; i++)
833 *(iv+i) = ctx->nonce[i];
834 for (i = 0; i < 8; i++)
835 *(iv+4+i) = req->iv[i];
836 *((__be32 *)(iv+12)) = counter;
838 if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
839 one_entry_in_sg = 1;
840 scatterwalk_start(&src_sg_walk, req->src);
841 scatterwalk_start(&assoc_sg_walk, req->assoc);
842 src = scatterwalk_map(&src_sg_walk);
843 assoc = scatterwalk_map(&assoc_sg_walk);
844 dst = src;
845 if (unlikely(req->src != req->dst)) {
846 scatterwalk_start(&dst_sg_walk, req->dst);
847 dst = scatterwalk_map(&dst_sg_walk);
850 } else {
851 /* Allocate memory for src, dst, assoc */
852 src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
853 if (!src)
854 return -ENOMEM;
855 assoc = (src + req->cryptlen + auth_tag_len);
856 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
857 scatterwalk_map_and_copy(assoc, req->assoc, 0,
858 req->assoclen, 0);
859 dst = src;
862 aesni_gcm_dec(aes_ctx, dst, src, tempCipherLen, iv,
863 ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
864 authTag, auth_tag_len);
866 /* Compare generated tag with passed in tag. */
867 retval = memcmp(src + tempCipherLen, authTag, auth_tag_len) ?
868 -EBADMSG : 0;
870 if (one_entry_in_sg) {
871 if (unlikely(req->src != req->dst)) {
872 scatterwalk_unmap(dst);
873 scatterwalk_done(&dst_sg_walk, 0, 0);
875 scatterwalk_unmap(src);
876 scatterwalk_unmap(assoc);
877 scatterwalk_done(&src_sg_walk, 0, 0);
878 scatterwalk_done(&assoc_sg_walk, 0, 0);
879 } else {
880 scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
881 kfree(src);
883 return retval;
885 #endif
887 static struct crypto_alg aesni_algs[] = { {
888 .cra_name = "aes",
889 .cra_driver_name = "aes-aesni",
890 .cra_priority = 300,
891 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
892 .cra_blocksize = AES_BLOCK_SIZE,
893 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
894 AESNI_ALIGN - 1,
895 .cra_alignmask = 0,
896 .cra_module = THIS_MODULE,
897 .cra_u = {
898 .cipher = {
899 .cia_min_keysize = AES_MIN_KEY_SIZE,
900 .cia_max_keysize = AES_MAX_KEY_SIZE,
901 .cia_setkey = aes_set_key,
902 .cia_encrypt = aes_encrypt,
903 .cia_decrypt = aes_decrypt
906 }, {
907 .cra_name = "__aes-aesni",
908 .cra_driver_name = "__driver-aes-aesni",
909 .cra_priority = 0,
910 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
911 .cra_blocksize = AES_BLOCK_SIZE,
912 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
913 AESNI_ALIGN - 1,
914 .cra_alignmask = 0,
915 .cra_module = THIS_MODULE,
916 .cra_u = {
917 .cipher = {
918 .cia_min_keysize = AES_MIN_KEY_SIZE,
919 .cia_max_keysize = AES_MAX_KEY_SIZE,
920 .cia_setkey = aes_set_key,
921 .cia_encrypt = __aes_encrypt,
922 .cia_decrypt = __aes_decrypt
925 }, {
926 .cra_name = "__ecb-aes-aesni",
927 .cra_driver_name = "__driver-ecb-aes-aesni",
928 .cra_priority = 0,
929 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
930 .cra_blocksize = AES_BLOCK_SIZE,
931 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
932 AESNI_ALIGN - 1,
933 .cra_alignmask = 0,
934 .cra_type = &crypto_blkcipher_type,
935 .cra_module = THIS_MODULE,
936 .cra_u = {
937 .blkcipher = {
938 .min_keysize = AES_MIN_KEY_SIZE,
939 .max_keysize = AES_MAX_KEY_SIZE,
940 .setkey = aes_set_key,
941 .encrypt = ecb_encrypt,
942 .decrypt = ecb_decrypt,
945 }, {
946 .cra_name = "__cbc-aes-aesni",
947 .cra_driver_name = "__driver-cbc-aes-aesni",
948 .cra_priority = 0,
949 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
950 .cra_blocksize = AES_BLOCK_SIZE,
951 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
952 AESNI_ALIGN - 1,
953 .cra_alignmask = 0,
954 .cra_type = &crypto_blkcipher_type,
955 .cra_module = THIS_MODULE,
956 .cra_u = {
957 .blkcipher = {
958 .min_keysize = AES_MIN_KEY_SIZE,
959 .max_keysize = AES_MAX_KEY_SIZE,
960 .setkey = aes_set_key,
961 .encrypt = cbc_encrypt,
962 .decrypt = cbc_decrypt,
965 }, {
966 .cra_name = "ecb(aes)",
967 .cra_driver_name = "ecb-aes-aesni",
968 .cra_priority = 400,
969 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
970 .cra_blocksize = AES_BLOCK_SIZE,
971 .cra_ctxsize = sizeof(struct async_aes_ctx),
972 .cra_alignmask = 0,
973 .cra_type = &crypto_ablkcipher_type,
974 .cra_module = THIS_MODULE,
975 .cra_init = ablk_ecb_init,
976 .cra_exit = ablk_exit,
977 .cra_u = {
978 .ablkcipher = {
979 .min_keysize = AES_MIN_KEY_SIZE,
980 .max_keysize = AES_MAX_KEY_SIZE,
981 .setkey = ablk_set_key,
982 .encrypt = ablk_encrypt,
983 .decrypt = ablk_decrypt,
986 }, {
987 .cra_name = "cbc(aes)",
988 .cra_driver_name = "cbc-aes-aesni",
989 .cra_priority = 400,
990 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
991 .cra_blocksize = AES_BLOCK_SIZE,
992 .cra_ctxsize = sizeof(struct async_aes_ctx),
993 .cra_alignmask = 0,
994 .cra_type = &crypto_ablkcipher_type,
995 .cra_module = THIS_MODULE,
996 .cra_init = ablk_cbc_init,
997 .cra_exit = ablk_exit,
998 .cra_u = {
999 .ablkcipher = {
1000 .min_keysize = AES_MIN_KEY_SIZE,
1001 .max_keysize = AES_MAX_KEY_SIZE,
1002 .ivsize = AES_BLOCK_SIZE,
1003 .setkey = ablk_set_key,
1004 .encrypt = ablk_encrypt,
1005 .decrypt = ablk_decrypt,
1008 #ifdef CONFIG_X86_64
1009 }, {
1010 .cra_name = "__ctr-aes-aesni",
1011 .cra_driver_name = "__driver-ctr-aes-aesni",
1012 .cra_priority = 0,
1013 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
1014 .cra_blocksize = 1,
1015 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1016 AESNI_ALIGN - 1,
1017 .cra_alignmask = 0,
1018 .cra_type = &crypto_blkcipher_type,
1019 .cra_module = THIS_MODULE,
1020 .cra_u = {
1021 .blkcipher = {
1022 .min_keysize = AES_MIN_KEY_SIZE,
1023 .max_keysize = AES_MAX_KEY_SIZE,
1024 .ivsize = AES_BLOCK_SIZE,
1025 .setkey = aes_set_key,
1026 .encrypt = ctr_crypt,
1027 .decrypt = ctr_crypt,
1030 }, {
1031 .cra_name = "ctr(aes)",
1032 .cra_driver_name = "ctr-aes-aesni",
1033 .cra_priority = 400,
1034 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1035 .cra_blocksize = 1,
1036 .cra_ctxsize = sizeof(struct async_aes_ctx),
1037 .cra_alignmask = 0,
1038 .cra_type = &crypto_ablkcipher_type,
1039 .cra_module = THIS_MODULE,
1040 .cra_init = ablk_ctr_init,
1041 .cra_exit = ablk_exit,
1042 .cra_u = {
1043 .ablkcipher = {
1044 .min_keysize = AES_MIN_KEY_SIZE,
1045 .max_keysize = AES_MAX_KEY_SIZE,
1046 .ivsize = AES_BLOCK_SIZE,
1047 .setkey = ablk_set_key,
1048 .encrypt = ablk_encrypt,
1049 .decrypt = ablk_encrypt,
1050 .geniv = "chainiv",
1053 }, {
1054 .cra_name = "__gcm-aes-aesni",
1055 .cra_driver_name = "__driver-gcm-aes-aesni",
1056 .cra_priority = 0,
1057 .cra_flags = CRYPTO_ALG_TYPE_AEAD,
1058 .cra_blocksize = 1,
1059 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) +
1060 AESNI_ALIGN,
1061 .cra_alignmask = 0,
1062 .cra_type = &crypto_aead_type,
1063 .cra_module = THIS_MODULE,
1064 .cra_u = {
1065 .aead = {
1066 .encrypt = __driver_rfc4106_encrypt,
1067 .decrypt = __driver_rfc4106_decrypt,
1070 }, {
1071 .cra_name = "rfc4106(gcm(aes))",
1072 .cra_driver_name = "rfc4106-gcm-aesni",
1073 .cra_priority = 400,
1074 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1075 .cra_blocksize = 1,
1076 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) +
1077 AESNI_ALIGN,
1078 .cra_alignmask = 0,
1079 .cra_type = &crypto_nivaead_type,
1080 .cra_module = THIS_MODULE,
1081 .cra_init = rfc4106_init,
1082 .cra_exit = rfc4106_exit,
1083 .cra_u = {
1084 .aead = {
1085 .setkey = rfc4106_set_key,
1086 .setauthsize = rfc4106_set_authsize,
1087 .encrypt = rfc4106_encrypt,
1088 .decrypt = rfc4106_decrypt,
1089 .geniv = "seqiv",
1090 .ivsize = 8,
1091 .maxauthsize = 16,
1094 #ifdef HAS_CTR
1095 }, {
1096 .cra_name = "rfc3686(ctr(aes))",
1097 .cra_driver_name = "rfc3686-ctr-aes-aesni",
1098 .cra_priority = 400,
1099 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1100 .cra_blocksize = 1,
1101 .cra_ctxsize = sizeof(struct async_aes_ctx),
1102 .cra_alignmask = 0,
1103 .cra_type = &crypto_ablkcipher_type,
1104 .cra_module = THIS_MODULE,
1105 .cra_init = ablk_rfc3686_ctr_init,
1106 .cra_exit = ablk_exit,
1107 .cra_u = {
1108 .ablkcipher = {
1109 .min_keysize = AES_MIN_KEY_SIZE +
1110 CTR_RFC3686_NONCE_SIZE,
1111 .max_keysize = AES_MAX_KEY_SIZE +
1112 CTR_RFC3686_NONCE_SIZE,
1113 .ivsize = CTR_RFC3686_IV_SIZE,
1114 .setkey = ablk_set_key,
1115 .encrypt = ablk_encrypt,
1116 .decrypt = ablk_decrypt,
1117 .geniv = "seqiv",
1120 #endif
1121 #endif
1122 #ifdef HAS_LRW
1123 }, {
1124 .cra_name = "lrw(aes)",
1125 .cra_driver_name = "lrw-aes-aesni",
1126 .cra_priority = 400,
1127 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1128 .cra_blocksize = AES_BLOCK_SIZE,
1129 .cra_ctxsize = sizeof(struct async_aes_ctx),
1130 .cra_alignmask = 0,
1131 .cra_type = &crypto_ablkcipher_type,
1132 .cra_module = THIS_MODULE,
1133 .cra_init = ablk_lrw_init,
1134 .cra_exit = ablk_exit,
1135 .cra_u = {
1136 .ablkcipher = {
1137 .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1138 .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1139 .ivsize = AES_BLOCK_SIZE,
1140 .setkey = ablk_set_key,
1141 .encrypt = ablk_encrypt,
1142 .decrypt = ablk_decrypt,
1145 #endif
1146 #ifdef HAS_PCBC
1147 }, {
1148 .cra_name = "pcbc(aes)",
1149 .cra_driver_name = "pcbc-aes-aesni",
1150 .cra_priority = 400,
1151 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1152 .cra_blocksize = AES_BLOCK_SIZE,
1153 .cra_ctxsize = sizeof(struct async_aes_ctx),
1154 .cra_alignmask = 0,
1155 .cra_type = &crypto_ablkcipher_type,
1156 .cra_module = THIS_MODULE,
1157 .cra_init = ablk_pcbc_init,
1158 .cra_exit = ablk_exit,
1159 .cra_u = {
1160 .ablkcipher = {
1161 .min_keysize = AES_MIN_KEY_SIZE,
1162 .max_keysize = AES_MAX_KEY_SIZE,
1163 .ivsize = AES_BLOCK_SIZE,
1164 .setkey = ablk_set_key,
1165 .encrypt = ablk_encrypt,
1166 .decrypt = ablk_decrypt,
1169 #endif
1170 #ifdef HAS_XTS
1171 }, {
1172 .cra_name = "xts(aes)",
1173 .cra_driver_name = "xts-aes-aesni",
1174 .cra_priority = 400,
1175 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1176 .cra_blocksize = AES_BLOCK_SIZE,
1177 .cra_ctxsize = sizeof(struct async_aes_ctx),
1178 .cra_alignmask = 0,
1179 .cra_type = &crypto_ablkcipher_type,
1180 .cra_module = THIS_MODULE,
1181 .cra_init = ablk_xts_init,
1182 .cra_exit = ablk_exit,
1183 .cra_u = {
1184 .ablkcipher = {
1185 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1186 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1187 .ivsize = AES_BLOCK_SIZE,
1188 .setkey = ablk_set_key,
1189 .encrypt = ablk_encrypt,
1190 .decrypt = ablk_decrypt,
1193 #endif
1194 } };
1197 static const struct x86_cpu_id aesni_cpu_id[] = {
1198 X86_FEATURE_MATCH(X86_FEATURE_AES),
1201 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1203 static int __init aesni_init(void)
1205 int err, i;
1207 if (!x86_match_cpu(aesni_cpu_id))
1208 return -ENODEV;
1210 err = crypto_fpu_init();
1211 if (err)
1212 return err;
1214 for (i = 0; i < ARRAY_SIZE(aesni_algs); i++)
1215 INIT_LIST_HEAD(&aesni_algs[i].cra_list);
1217 return crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1220 static void __exit aesni_exit(void)
1222 crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1224 crypto_fpu_exit();
1227 module_init(aesni_init);
1228 module_exit(aesni_exit);
1230 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1231 MODULE_LICENSE("GPL");
1232 MODULE_ALIAS("aes");