crypto: move arch/x86/include/asm/serpent-{sse2|avx}.h to arch/x86/include/asm/crypto/
[linux-2.6.git] / arch / x86 / crypto / serpent_avx_glue.c
blobb36bdac237eb0ee736f93ca09b62c14defabde36
1 /*
2 * Glue Code for AVX assembler versions of Serpent Cipher
4 * Copyright (C) 2012 Johannes Goetzfried
5 * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
7 * Glue code based on serpent_sse2_glue.c by:
8 * Copyright (C) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
23 * USA
27 #include <linux/module.h>
28 #include <linux/hardirq.h>
29 #include <linux/types.h>
30 #include <linux/crypto.h>
31 #include <linux/err.h>
32 #include <crypto/algapi.h>
33 #include <crypto/serpent.h>
34 #include <crypto/cryptd.h>
35 #include <crypto/b128ops.h>
36 #include <crypto/ctr.h>
37 #include <crypto/lrw.h>
38 #include <crypto/xts.h>
39 #include <asm/xcr.h>
40 #include <asm/xsave.h>
41 #include <asm/crypto/serpent-avx.h>
42 #include <asm/crypto/ablk_helper.h>
43 #include <asm/crypto/glue_helper.h>
45 static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src)
47 u128 ivs[SERPENT_PARALLEL_BLOCKS - 1];
48 unsigned int j;
50 for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++)
51 ivs[j] = src[j];
53 serpent_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src);
55 for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++)
56 u128_xor(dst + (j + 1), dst + (j + 1), ivs + j);
59 static void serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, u128 *iv)
61 be128 ctrblk;
63 u128_to_be128(&ctrblk, iv);
64 u128_inc(iv);
66 __serpent_encrypt(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk);
67 u128_xor(dst, src, (u128 *)&ctrblk);
70 static void serpent_crypt_ctr_xway(void *ctx, u128 *dst, const u128 *src,
71 u128 *iv)
73 be128 ctrblks[SERPENT_PARALLEL_BLOCKS];
74 unsigned int i;
76 for (i = 0; i < SERPENT_PARALLEL_BLOCKS; i++) {
77 if (dst != src)
78 dst[i] = src[i];
80 u128_to_be128(&ctrblks[i], iv);
81 u128_inc(iv);
84 serpent_enc_blk_xway_xor(ctx, (u8 *)dst, (u8 *)ctrblks);
87 static const struct common_glue_ctx serpent_enc = {
88 .num_funcs = 2,
89 .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
91 .funcs = { {
92 .num_blocks = SERPENT_PARALLEL_BLOCKS,
93 .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_enc_blk_xway) }
94 }, {
95 .num_blocks = 1,
96 .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) }
97 } }
100 static const struct common_glue_ctx serpent_ctr = {
101 .num_funcs = 2,
102 .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
104 .funcs = { {
105 .num_blocks = SERPENT_PARALLEL_BLOCKS,
106 .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr_xway) }
107 }, {
108 .num_blocks = 1,
109 .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr) }
113 static const struct common_glue_ctx serpent_dec = {
114 .num_funcs = 2,
115 .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
117 .funcs = { {
118 .num_blocks = SERPENT_PARALLEL_BLOCKS,
119 .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_dec_blk_xway) }
120 }, {
121 .num_blocks = 1,
122 .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) }
126 static const struct common_glue_ctx serpent_dec_cbc = {
127 .num_funcs = 2,
128 .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
130 .funcs = { {
131 .num_blocks = SERPENT_PARALLEL_BLOCKS,
132 .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_decrypt_cbc_xway) }
133 }, {
134 .num_blocks = 1,
135 .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) }
139 static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
140 struct scatterlist *src, unsigned int nbytes)
142 return glue_ecb_crypt_128bit(&serpent_enc, desc, dst, src, nbytes);
145 static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
146 struct scatterlist *src, unsigned int nbytes)
148 return glue_ecb_crypt_128bit(&serpent_dec, desc, dst, src, nbytes);
151 static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
152 struct scatterlist *src, unsigned int nbytes)
154 return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(__serpent_encrypt), desc,
155 dst, src, nbytes);
158 static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
159 struct scatterlist *src, unsigned int nbytes)
161 return glue_cbc_decrypt_128bit(&serpent_dec_cbc, desc, dst, src,
162 nbytes);
165 static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
166 struct scatterlist *src, unsigned int nbytes)
168 return glue_ctr_crypt_128bit(&serpent_ctr, desc, dst, src, nbytes);
171 static inline bool serpent_fpu_begin(bool fpu_enabled, unsigned int nbytes)
173 return glue_fpu_begin(SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS,
174 NULL, fpu_enabled, nbytes);
177 static inline void serpent_fpu_end(bool fpu_enabled)
179 glue_fpu_end(fpu_enabled);
182 struct crypt_priv {
183 struct serpent_ctx *ctx;
184 bool fpu_enabled;
187 static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
189 const unsigned int bsize = SERPENT_BLOCK_SIZE;
190 struct crypt_priv *ctx = priv;
191 int i;
193 ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
195 if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
196 serpent_enc_blk_xway(ctx->ctx, srcdst, srcdst);
197 return;
200 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
201 __serpent_encrypt(ctx->ctx, srcdst, srcdst);
204 static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
206 const unsigned int bsize = SERPENT_BLOCK_SIZE;
207 struct crypt_priv *ctx = priv;
208 int i;
210 ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
212 if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
213 serpent_dec_blk_xway(ctx->ctx, srcdst, srcdst);
214 return;
217 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
218 __serpent_decrypt(ctx->ctx, srcdst, srcdst);
221 struct serpent_lrw_ctx {
222 struct lrw_table_ctx lrw_table;
223 struct serpent_ctx serpent_ctx;
226 static int lrw_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
227 unsigned int keylen)
229 struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
230 int err;
232 err = __serpent_setkey(&ctx->serpent_ctx, key, keylen -
233 SERPENT_BLOCK_SIZE);
234 if (err)
235 return err;
237 return lrw_init_table(&ctx->lrw_table, key + keylen -
238 SERPENT_BLOCK_SIZE);
241 static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
242 struct scatterlist *src, unsigned int nbytes)
244 struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
245 be128 buf[SERPENT_PARALLEL_BLOCKS];
246 struct crypt_priv crypt_ctx = {
247 .ctx = &ctx->serpent_ctx,
248 .fpu_enabled = false,
250 struct lrw_crypt_req req = {
251 .tbuf = buf,
252 .tbuflen = sizeof(buf),
254 .table_ctx = &ctx->lrw_table,
255 .crypt_ctx = &crypt_ctx,
256 .crypt_fn = encrypt_callback,
258 int ret;
260 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
261 ret = lrw_crypt(desc, dst, src, nbytes, &req);
262 serpent_fpu_end(crypt_ctx.fpu_enabled);
264 return ret;
267 static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
268 struct scatterlist *src, unsigned int nbytes)
270 struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
271 be128 buf[SERPENT_PARALLEL_BLOCKS];
272 struct crypt_priv crypt_ctx = {
273 .ctx = &ctx->serpent_ctx,
274 .fpu_enabled = false,
276 struct lrw_crypt_req req = {
277 .tbuf = buf,
278 .tbuflen = sizeof(buf),
280 .table_ctx = &ctx->lrw_table,
281 .crypt_ctx = &crypt_ctx,
282 .crypt_fn = decrypt_callback,
284 int ret;
286 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
287 ret = lrw_crypt(desc, dst, src, nbytes, &req);
288 serpent_fpu_end(crypt_ctx.fpu_enabled);
290 return ret;
293 static void lrw_exit_tfm(struct crypto_tfm *tfm)
295 struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
297 lrw_free_table(&ctx->lrw_table);
300 struct serpent_xts_ctx {
301 struct serpent_ctx tweak_ctx;
302 struct serpent_ctx crypt_ctx;
305 static int xts_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
306 unsigned int keylen)
308 struct serpent_xts_ctx *ctx = crypto_tfm_ctx(tfm);
309 u32 *flags = &tfm->crt_flags;
310 int err;
312 /* key consists of keys of equal size concatenated, therefore
313 * the length must be even
315 if (keylen % 2) {
316 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
317 return -EINVAL;
320 /* first half of xts-key is for crypt */
321 err = __serpent_setkey(&ctx->crypt_ctx, key, keylen / 2);
322 if (err)
323 return err;
325 /* second half of xts-key is for tweak */
326 return __serpent_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2);
329 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
330 struct scatterlist *src, unsigned int nbytes)
332 struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
333 be128 buf[SERPENT_PARALLEL_BLOCKS];
334 struct crypt_priv crypt_ctx = {
335 .ctx = &ctx->crypt_ctx,
336 .fpu_enabled = false,
338 struct xts_crypt_req req = {
339 .tbuf = buf,
340 .tbuflen = sizeof(buf),
342 .tweak_ctx = &ctx->tweak_ctx,
343 .tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt),
344 .crypt_ctx = &crypt_ctx,
345 .crypt_fn = encrypt_callback,
347 int ret;
349 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
350 ret = xts_crypt(desc, dst, src, nbytes, &req);
351 serpent_fpu_end(crypt_ctx.fpu_enabled);
353 return ret;
356 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
357 struct scatterlist *src, unsigned int nbytes)
359 struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
360 be128 buf[SERPENT_PARALLEL_BLOCKS];
361 struct crypt_priv crypt_ctx = {
362 .ctx = &ctx->crypt_ctx,
363 .fpu_enabled = false,
365 struct xts_crypt_req req = {
366 .tbuf = buf,
367 .tbuflen = sizeof(buf),
369 .tweak_ctx = &ctx->tweak_ctx,
370 .tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt),
371 .crypt_ctx = &crypt_ctx,
372 .crypt_fn = decrypt_callback,
374 int ret;
376 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
377 ret = xts_crypt(desc, dst, src, nbytes, &req);
378 serpent_fpu_end(crypt_ctx.fpu_enabled);
380 return ret;
383 static struct crypto_alg serpent_algs[10] = { {
384 .cra_name = "__ecb-serpent-avx",
385 .cra_driver_name = "__driver-ecb-serpent-avx",
386 .cra_priority = 0,
387 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
388 .cra_blocksize = SERPENT_BLOCK_SIZE,
389 .cra_ctxsize = sizeof(struct serpent_ctx),
390 .cra_alignmask = 0,
391 .cra_type = &crypto_blkcipher_type,
392 .cra_module = THIS_MODULE,
393 .cra_list = LIST_HEAD_INIT(serpent_algs[0].cra_list),
394 .cra_u = {
395 .blkcipher = {
396 .min_keysize = SERPENT_MIN_KEY_SIZE,
397 .max_keysize = SERPENT_MAX_KEY_SIZE,
398 .setkey = serpent_setkey,
399 .encrypt = ecb_encrypt,
400 .decrypt = ecb_decrypt,
403 }, {
404 .cra_name = "__cbc-serpent-avx",
405 .cra_driver_name = "__driver-cbc-serpent-avx",
406 .cra_priority = 0,
407 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
408 .cra_blocksize = SERPENT_BLOCK_SIZE,
409 .cra_ctxsize = sizeof(struct serpent_ctx),
410 .cra_alignmask = 0,
411 .cra_type = &crypto_blkcipher_type,
412 .cra_module = THIS_MODULE,
413 .cra_list = LIST_HEAD_INIT(serpent_algs[1].cra_list),
414 .cra_u = {
415 .blkcipher = {
416 .min_keysize = SERPENT_MIN_KEY_SIZE,
417 .max_keysize = SERPENT_MAX_KEY_SIZE,
418 .setkey = serpent_setkey,
419 .encrypt = cbc_encrypt,
420 .decrypt = cbc_decrypt,
423 }, {
424 .cra_name = "__ctr-serpent-avx",
425 .cra_driver_name = "__driver-ctr-serpent-avx",
426 .cra_priority = 0,
427 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
428 .cra_blocksize = 1,
429 .cra_ctxsize = sizeof(struct serpent_ctx),
430 .cra_alignmask = 0,
431 .cra_type = &crypto_blkcipher_type,
432 .cra_module = THIS_MODULE,
433 .cra_list = LIST_HEAD_INIT(serpent_algs[2].cra_list),
434 .cra_u = {
435 .blkcipher = {
436 .min_keysize = SERPENT_MIN_KEY_SIZE,
437 .max_keysize = SERPENT_MAX_KEY_SIZE,
438 .ivsize = SERPENT_BLOCK_SIZE,
439 .setkey = serpent_setkey,
440 .encrypt = ctr_crypt,
441 .decrypt = ctr_crypt,
444 }, {
445 .cra_name = "__lrw-serpent-avx",
446 .cra_driver_name = "__driver-lrw-serpent-avx",
447 .cra_priority = 0,
448 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
449 .cra_blocksize = SERPENT_BLOCK_SIZE,
450 .cra_ctxsize = sizeof(struct serpent_lrw_ctx),
451 .cra_alignmask = 0,
452 .cra_type = &crypto_blkcipher_type,
453 .cra_module = THIS_MODULE,
454 .cra_list = LIST_HEAD_INIT(serpent_algs[3].cra_list),
455 .cra_exit = lrw_exit_tfm,
456 .cra_u = {
457 .blkcipher = {
458 .min_keysize = SERPENT_MIN_KEY_SIZE +
459 SERPENT_BLOCK_SIZE,
460 .max_keysize = SERPENT_MAX_KEY_SIZE +
461 SERPENT_BLOCK_SIZE,
462 .ivsize = SERPENT_BLOCK_SIZE,
463 .setkey = lrw_serpent_setkey,
464 .encrypt = lrw_encrypt,
465 .decrypt = lrw_decrypt,
468 }, {
469 .cra_name = "__xts-serpent-avx",
470 .cra_driver_name = "__driver-xts-serpent-avx",
471 .cra_priority = 0,
472 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
473 .cra_blocksize = SERPENT_BLOCK_SIZE,
474 .cra_ctxsize = sizeof(struct serpent_xts_ctx),
475 .cra_alignmask = 0,
476 .cra_type = &crypto_blkcipher_type,
477 .cra_module = THIS_MODULE,
478 .cra_list = LIST_HEAD_INIT(serpent_algs[4].cra_list),
479 .cra_u = {
480 .blkcipher = {
481 .min_keysize = SERPENT_MIN_KEY_SIZE * 2,
482 .max_keysize = SERPENT_MAX_KEY_SIZE * 2,
483 .ivsize = SERPENT_BLOCK_SIZE,
484 .setkey = xts_serpent_setkey,
485 .encrypt = xts_encrypt,
486 .decrypt = xts_decrypt,
489 }, {
490 .cra_name = "ecb(serpent)",
491 .cra_driver_name = "ecb-serpent-avx",
492 .cra_priority = 500,
493 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
494 .cra_blocksize = SERPENT_BLOCK_SIZE,
495 .cra_ctxsize = sizeof(struct async_helper_ctx),
496 .cra_alignmask = 0,
497 .cra_type = &crypto_ablkcipher_type,
498 .cra_module = THIS_MODULE,
499 .cra_list = LIST_HEAD_INIT(serpent_algs[5].cra_list),
500 .cra_init = ablk_init,
501 .cra_exit = ablk_exit,
502 .cra_u = {
503 .ablkcipher = {
504 .min_keysize = SERPENT_MIN_KEY_SIZE,
505 .max_keysize = SERPENT_MAX_KEY_SIZE,
506 .setkey = ablk_set_key,
507 .encrypt = ablk_encrypt,
508 .decrypt = ablk_decrypt,
511 }, {
512 .cra_name = "cbc(serpent)",
513 .cra_driver_name = "cbc-serpent-avx",
514 .cra_priority = 500,
515 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
516 .cra_blocksize = SERPENT_BLOCK_SIZE,
517 .cra_ctxsize = sizeof(struct async_helper_ctx),
518 .cra_alignmask = 0,
519 .cra_type = &crypto_ablkcipher_type,
520 .cra_module = THIS_MODULE,
521 .cra_list = LIST_HEAD_INIT(serpent_algs[6].cra_list),
522 .cra_init = ablk_init,
523 .cra_exit = ablk_exit,
524 .cra_u = {
525 .ablkcipher = {
526 .min_keysize = SERPENT_MIN_KEY_SIZE,
527 .max_keysize = SERPENT_MAX_KEY_SIZE,
528 .ivsize = SERPENT_BLOCK_SIZE,
529 .setkey = ablk_set_key,
530 .encrypt = __ablk_encrypt,
531 .decrypt = ablk_decrypt,
534 }, {
535 .cra_name = "ctr(serpent)",
536 .cra_driver_name = "ctr-serpent-avx",
537 .cra_priority = 500,
538 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
539 .cra_blocksize = 1,
540 .cra_ctxsize = sizeof(struct async_helper_ctx),
541 .cra_alignmask = 0,
542 .cra_type = &crypto_ablkcipher_type,
543 .cra_module = THIS_MODULE,
544 .cra_list = LIST_HEAD_INIT(serpent_algs[7].cra_list),
545 .cra_init = ablk_init,
546 .cra_exit = ablk_exit,
547 .cra_u = {
548 .ablkcipher = {
549 .min_keysize = SERPENT_MIN_KEY_SIZE,
550 .max_keysize = SERPENT_MAX_KEY_SIZE,
551 .ivsize = SERPENT_BLOCK_SIZE,
552 .setkey = ablk_set_key,
553 .encrypt = ablk_encrypt,
554 .decrypt = ablk_encrypt,
555 .geniv = "chainiv",
558 }, {
559 .cra_name = "lrw(serpent)",
560 .cra_driver_name = "lrw-serpent-avx",
561 .cra_priority = 500,
562 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
563 .cra_blocksize = SERPENT_BLOCK_SIZE,
564 .cra_ctxsize = sizeof(struct async_helper_ctx),
565 .cra_alignmask = 0,
566 .cra_type = &crypto_ablkcipher_type,
567 .cra_module = THIS_MODULE,
568 .cra_list = LIST_HEAD_INIT(serpent_algs[8].cra_list),
569 .cra_init = ablk_init,
570 .cra_exit = ablk_exit,
571 .cra_u = {
572 .ablkcipher = {
573 .min_keysize = SERPENT_MIN_KEY_SIZE +
574 SERPENT_BLOCK_SIZE,
575 .max_keysize = SERPENT_MAX_KEY_SIZE +
576 SERPENT_BLOCK_SIZE,
577 .ivsize = SERPENT_BLOCK_SIZE,
578 .setkey = ablk_set_key,
579 .encrypt = ablk_encrypt,
580 .decrypt = ablk_decrypt,
583 }, {
584 .cra_name = "xts(serpent)",
585 .cra_driver_name = "xts-serpent-avx",
586 .cra_priority = 500,
587 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
588 .cra_blocksize = SERPENT_BLOCK_SIZE,
589 .cra_ctxsize = sizeof(struct async_helper_ctx),
590 .cra_alignmask = 0,
591 .cra_type = &crypto_ablkcipher_type,
592 .cra_module = THIS_MODULE,
593 .cra_list = LIST_HEAD_INIT(serpent_algs[9].cra_list),
594 .cra_init = ablk_init,
595 .cra_exit = ablk_exit,
596 .cra_u = {
597 .ablkcipher = {
598 .min_keysize = SERPENT_MIN_KEY_SIZE * 2,
599 .max_keysize = SERPENT_MAX_KEY_SIZE * 2,
600 .ivsize = SERPENT_BLOCK_SIZE,
601 .setkey = ablk_set_key,
602 .encrypt = ablk_encrypt,
603 .decrypt = ablk_decrypt,
606 } };
608 static int __init serpent_init(void)
610 u64 xcr0;
612 if (!cpu_has_avx || !cpu_has_osxsave) {
613 printk(KERN_INFO "AVX instructions are not detected.\n");
614 return -ENODEV;
617 xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
618 if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
619 printk(KERN_INFO "AVX detected but unusable.\n");
620 return -ENODEV;
623 return crypto_register_algs(serpent_algs, ARRAY_SIZE(serpent_algs));
626 static void __exit serpent_exit(void)
628 crypto_unregister_algs(serpent_algs, ARRAY_SIZE(serpent_algs));
631 module_init(serpent_init);
632 module_exit(serpent_exit);
634 MODULE_DESCRIPTION("Serpent Cipher Algorithm, AVX optimized");
635 MODULE_LICENSE("GPL");
636 MODULE_ALIAS("serpent");