ixgbe: Make the bridge mode setting sticky
[linux-2.6/libata-dev.git] / arch / x86 / crypto / serpent_avx_glue.c
blob3f543a04cf1ee2c697e2b2ea1b427ea7f8631386
1 /*
2 * Glue Code for AVX assembler versions of Serpent Cipher
4 * Copyright (C) 2012 Johannes Goetzfried
5 * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
7 * Glue code based on serpent_sse2_glue.c by:
8 * Copyright (C) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
23 * USA
27 #include <linux/module.h>
28 #include <linux/hardirq.h>
29 #include <linux/types.h>
30 #include <linux/crypto.h>
31 #include <linux/err.h>
32 #include <crypto/algapi.h>
33 #include <crypto/serpent.h>
34 #include <crypto/cryptd.h>
35 #include <crypto/b128ops.h>
36 #include <crypto/ctr.h>
37 #include <crypto/lrw.h>
38 #include <crypto/xts.h>
39 #include <asm/xcr.h>
40 #include <asm/xsave.h>
41 #include <asm/crypto/serpent-avx.h>
42 #include <asm/crypto/ablk_helper.h>
43 #include <asm/crypto/glue_helper.h>
45 static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src)
47 u128 ivs[SERPENT_PARALLEL_BLOCKS - 1];
48 unsigned int j;
50 for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++)
51 ivs[j] = src[j];
53 serpent_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src);
55 for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++)
56 u128_xor(dst + (j + 1), dst + (j + 1), ivs + j);
59 static void serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, u128 *iv)
61 be128 ctrblk;
63 u128_to_be128(&ctrblk, iv);
64 u128_inc(iv);
66 __serpent_encrypt(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk);
67 u128_xor(dst, src, (u128 *)&ctrblk);
70 static void serpent_crypt_ctr_xway(void *ctx, u128 *dst, const u128 *src,
71 u128 *iv)
73 be128 ctrblks[SERPENT_PARALLEL_BLOCKS];
74 unsigned int i;
76 for (i = 0; i < SERPENT_PARALLEL_BLOCKS; i++) {
77 if (dst != src)
78 dst[i] = src[i];
80 u128_to_be128(&ctrblks[i], iv);
81 u128_inc(iv);
84 serpent_enc_blk_xway_xor(ctx, (u8 *)dst, (u8 *)ctrblks);
87 static const struct common_glue_ctx serpent_enc = {
88 .num_funcs = 2,
89 .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
91 .funcs = { {
92 .num_blocks = SERPENT_PARALLEL_BLOCKS,
93 .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_enc_blk_xway) }
94 }, {
95 .num_blocks = 1,
96 .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) }
97 } }
100 static const struct common_glue_ctx serpent_ctr = {
101 .num_funcs = 2,
102 .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
104 .funcs = { {
105 .num_blocks = SERPENT_PARALLEL_BLOCKS,
106 .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr_xway) }
107 }, {
108 .num_blocks = 1,
109 .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr) }
113 static const struct common_glue_ctx serpent_dec = {
114 .num_funcs = 2,
115 .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
117 .funcs = { {
118 .num_blocks = SERPENT_PARALLEL_BLOCKS,
119 .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_dec_blk_xway) }
120 }, {
121 .num_blocks = 1,
122 .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) }
126 static const struct common_glue_ctx serpent_dec_cbc = {
127 .num_funcs = 2,
128 .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
130 .funcs = { {
131 .num_blocks = SERPENT_PARALLEL_BLOCKS,
132 .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_decrypt_cbc_xway) }
133 }, {
134 .num_blocks = 1,
135 .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) }
139 static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
140 struct scatterlist *src, unsigned int nbytes)
142 return glue_ecb_crypt_128bit(&serpent_enc, desc, dst, src, nbytes);
145 static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
146 struct scatterlist *src, unsigned int nbytes)
148 return glue_ecb_crypt_128bit(&serpent_dec, desc, dst, src, nbytes);
151 static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
152 struct scatterlist *src, unsigned int nbytes)
154 return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(__serpent_encrypt), desc,
155 dst, src, nbytes);
158 static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
159 struct scatterlist *src, unsigned int nbytes)
161 return glue_cbc_decrypt_128bit(&serpent_dec_cbc, desc, dst, src,
162 nbytes);
165 static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
166 struct scatterlist *src, unsigned int nbytes)
168 return glue_ctr_crypt_128bit(&serpent_ctr, desc, dst, src, nbytes);
171 static inline bool serpent_fpu_begin(bool fpu_enabled, unsigned int nbytes)
173 return glue_fpu_begin(SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS,
174 NULL, fpu_enabled, nbytes);
177 static inline void serpent_fpu_end(bool fpu_enabled)
179 glue_fpu_end(fpu_enabled);
182 struct crypt_priv {
183 struct serpent_ctx *ctx;
184 bool fpu_enabled;
187 static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
189 const unsigned int bsize = SERPENT_BLOCK_SIZE;
190 struct crypt_priv *ctx = priv;
191 int i;
193 ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
195 if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
196 serpent_enc_blk_xway(ctx->ctx, srcdst, srcdst);
197 return;
200 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
201 __serpent_encrypt(ctx->ctx, srcdst, srcdst);
204 static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
206 const unsigned int bsize = SERPENT_BLOCK_SIZE;
207 struct crypt_priv *ctx = priv;
208 int i;
210 ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
212 if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
213 serpent_dec_blk_xway(ctx->ctx, srcdst, srcdst);
214 return;
217 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
218 __serpent_decrypt(ctx->ctx, srcdst, srcdst);
221 struct serpent_lrw_ctx {
222 struct lrw_table_ctx lrw_table;
223 struct serpent_ctx serpent_ctx;
226 static int lrw_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
227 unsigned int keylen)
229 struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
230 int err;
232 err = __serpent_setkey(&ctx->serpent_ctx, key, keylen -
233 SERPENT_BLOCK_SIZE);
234 if (err)
235 return err;
237 return lrw_init_table(&ctx->lrw_table, key + keylen -
238 SERPENT_BLOCK_SIZE);
241 static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
242 struct scatterlist *src, unsigned int nbytes)
244 struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
245 be128 buf[SERPENT_PARALLEL_BLOCKS];
246 struct crypt_priv crypt_ctx = {
247 .ctx = &ctx->serpent_ctx,
248 .fpu_enabled = false,
250 struct lrw_crypt_req req = {
251 .tbuf = buf,
252 .tbuflen = sizeof(buf),
254 .table_ctx = &ctx->lrw_table,
255 .crypt_ctx = &crypt_ctx,
256 .crypt_fn = encrypt_callback,
258 int ret;
260 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
261 ret = lrw_crypt(desc, dst, src, nbytes, &req);
262 serpent_fpu_end(crypt_ctx.fpu_enabled);
264 return ret;
267 static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
268 struct scatterlist *src, unsigned int nbytes)
270 struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
271 be128 buf[SERPENT_PARALLEL_BLOCKS];
272 struct crypt_priv crypt_ctx = {
273 .ctx = &ctx->serpent_ctx,
274 .fpu_enabled = false,
276 struct lrw_crypt_req req = {
277 .tbuf = buf,
278 .tbuflen = sizeof(buf),
280 .table_ctx = &ctx->lrw_table,
281 .crypt_ctx = &crypt_ctx,
282 .crypt_fn = decrypt_callback,
284 int ret;
286 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
287 ret = lrw_crypt(desc, dst, src, nbytes, &req);
288 serpent_fpu_end(crypt_ctx.fpu_enabled);
290 return ret;
293 static void lrw_exit_tfm(struct crypto_tfm *tfm)
295 struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
297 lrw_free_table(&ctx->lrw_table);
300 struct serpent_xts_ctx {
301 struct serpent_ctx tweak_ctx;
302 struct serpent_ctx crypt_ctx;
305 static int xts_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
306 unsigned int keylen)
308 struct serpent_xts_ctx *ctx = crypto_tfm_ctx(tfm);
309 u32 *flags = &tfm->crt_flags;
310 int err;
312 /* key consists of keys of equal size concatenated, therefore
313 * the length must be even
315 if (keylen % 2) {
316 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
317 return -EINVAL;
320 /* first half of xts-key is for crypt */
321 err = __serpent_setkey(&ctx->crypt_ctx, key, keylen / 2);
322 if (err)
323 return err;
325 /* second half of xts-key is for tweak */
326 return __serpent_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2);
329 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
330 struct scatterlist *src, unsigned int nbytes)
332 struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
333 be128 buf[SERPENT_PARALLEL_BLOCKS];
334 struct crypt_priv crypt_ctx = {
335 .ctx = &ctx->crypt_ctx,
336 .fpu_enabled = false,
338 struct xts_crypt_req req = {
339 .tbuf = buf,
340 .tbuflen = sizeof(buf),
342 .tweak_ctx = &ctx->tweak_ctx,
343 .tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt),
344 .crypt_ctx = &crypt_ctx,
345 .crypt_fn = encrypt_callback,
347 int ret;
349 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
350 ret = xts_crypt(desc, dst, src, nbytes, &req);
351 serpent_fpu_end(crypt_ctx.fpu_enabled);
353 return ret;
356 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
357 struct scatterlist *src, unsigned int nbytes)
359 struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
360 be128 buf[SERPENT_PARALLEL_BLOCKS];
361 struct crypt_priv crypt_ctx = {
362 .ctx = &ctx->crypt_ctx,
363 .fpu_enabled = false,
365 struct xts_crypt_req req = {
366 .tbuf = buf,
367 .tbuflen = sizeof(buf),
369 .tweak_ctx = &ctx->tweak_ctx,
370 .tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt),
371 .crypt_ctx = &crypt_ctx,
372 .crypt_fn = decrypt_callback,
374 int ret;
376 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
377 ret = xts_crypt(desc, dst, src, nbytes, &req);
378 serpent_fpu_end(crypt_ctx.fpu_enabled);
380 return ret;
383 static struct crypto_alg serpent_algs[10] = { {
384 .cra_name = "__ecb-serpent-avx",
385 .cra_driver_name = "__driver-ecb-serpent-avx",
386 .cra_priority = 0,
387 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
388 .cra_blocksize = SERPENT_BLOCK_SIZE,
389 .cra_ctxsize = sizeof(struct serpent_ctx),
390 .cra_alignmask = 0,
391 .cra_type = &crypto_blkcipher_type,
392 .cra_module = THIS_MODULE,
393 .cra_u = {
394 .blkcipher = {
395 .min_keysize = SERPENT_MIN_KEY_SIZE,
396 .max_keysize = SERPENT_MAX_KEY_SIZE,
397 .setkey = serpent_setkey,
398 .encrypt = ecb_encrypt,
399 .decrypt = ecb_decrypt,
402 }, {
403 .cra_name = "__cbc-serpent-avx",
404 .cra_driver_name = "__driver-cbc-serpent-avx",
405 .cra_priority = 0,
406 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
407 .cra_blocksize = SERPENT_BLOCK_SIZE,
408 .cra_ctxsize = sizeof(struct serpent_ctx),
409 .cra_alignmask = 0,
410 .cra_type = &crypto_blkcipher_type,
411 .cra_module = THIS_MODULE,
412 .cra_u = {
413 .blkcipher = {
414 .min_keysize = SERPENT_MIN_KEY_SIZE,
415 .max_keysize = SERPENT_MAX_KEY_SIZE,
416 .setkey = serpent_setkey,
417 .encrypt = cbc_encrypt,
418 .decrypt = cbc_decrypt,
421 }, {
422 .cra_name = "__ctr-serpent-avx",
423 .cra_driver_name = "__driver-ctr-serpent-avx",
424 .cra_priority = 0,
425 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
426 .cra_blocksize = 1,
427 .cra_ctxsize = sizeof(struct serpent_ctx),
428 .cra_alignmask = 0,
429 .cra_type = &crypto_blkcipher_type,
430 .cra_module = THIS_MODULE,
431 .cra_u = {
432 .blkcipher = {
433 .min_keysize = SERPENT_MIN_KEY_SIZE,
434 .max_keysize = SERPENT_MAX_KEY_SIZE,
435 .ivsize = SERPENT_BLOCK_SIZE,
436 .setkey = serpent_setkey,
437 .encrypt = ctr_crypt,
438 .decrypt = ctr_crypt,
441 }, {
442 .cra_name = "__lrw-serpent-avx",
443 .cra_driver_name = "__driver-lrw-serpent-avx",
444 .cra_priority = 0,
445 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
446 .cra_blocksize = SERPENT_BLOCK_SIZE,
447 .cra_ctxsize = sizeof(struct serpent_lrw_ctx),
448 .cra_alignmask = 0,
449 .cra_type = &crypto_blkcipher_type,
450 .cra_module = THIS_MODULE,
451 .cra_exit = lrw_exit_tfm,
452 .cra_u = {
453 .blkcipher = {
454 .min_keysize = SERPENT_MIN_KEY_SIZE +
455 SERPENT_BLOCK_SIZE,
456 .max_keysize = SERPENT_MAX_KEY_SIZE +
457 SERPENT_BLOCK_SIZE,
458 .ivsize = SERPENT_BLOCK_SIZE,
459 .setkey = lrw_serpent_setkey,
460 .encrypt = lrw_encrypt,
461 .decrypt = lrw_decrypt,
464 }, {
465 .cra_name = "__xts-serpent-avx",
466 .cra_driver_name = "__driver-xts-serpent-avx",
467 .cra_priority = 0,
468 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
469 .cra_blocksize = SERPENT_BLOCK_SIZE,
470 .cra_ctxsize = sizeof(struct serpent_xts_ctx),
471 .cra_alignmask = 0,
472 .cra_type = &crypto_blkcipher_type,
473 .cra_module = THIS_MODULE,
474 .cra_u = {
475 .blkcipher = {
476 .min_keysize = SERPENT_MIN_KEY_SIZE * 2,
477 .max_keysize = SERPENT_MAX_KEY_SIZE * 2,
478 .ivsize = SERPENT_BLOCK_SIZE,
479 .setkey = xts_serpent_setkey,
480 .encrypt = xts_encrypt,
481 .decrypt = xts_decrypt,
484 }, {
485 .cra_name = "ecb(serpent)",
486 .cra_driver_name = "ecb-serpent-avx",
487 .cra_priority = 500,
488 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
489 .cra_blocksize = SERPENT_BLOCK_SIZE,
490 .cra_ctxsize = sizeof(struct async_helper_ctx),
491 .cra_alignmask = 0,
492 .cra_type = &crypto_ablkcipher_type,
493 .cra_module = THIS_MODULE,
494 .cra_init = ablk_init,
495 .cra_exit = ablk_exit,
496 .cra_u = {
497 .ablkcipher = {
498 .min_keysize = SERPENT_MIN_KEY_SIZE,
499 .max_keysize = SERPENT_MAX_KEY_SIZE,
500 .setkey = ablk_set_key,
501 .encrypt = ablk_encrypt,
502 .decrypt = ablk_decrypt,
505 }, {
506 .cra_name = "cbc(serpent)",
507 .cra_driver_name = "cbc-serpent-avx",
508 .cra_priority = 500,
509 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
510 .cra_blocksize = SERPENT_BLOCK_SIZE,
511 .cra_ctxsize = sizeof(struct async_helper_ctx),
512 .cra_alignmask = 0,
513 .cra_type = &crypto_ablkcipher_type,
514 .cra_module = THIS_MODULE,
515 .cra_init = ablk_init,
516 .cra_exit = ablk_exit,
517 .cra_u = {
518 .ablkcipher = {
519 .min_keysize = SERPENT_MIN_KEY_SIZE,
520 .max_keysize = SERPENT_MAX_KEY_SIZE,
521 .ivsize = SERPENT_BLOCK_SIZE,
522 .setkey = ablk_set_key,
523 .encrypt = __ablk_encrypt,
524 .decrypt = ablk_decrypt,
527 }, {
528 .cra_name = "ctr(serpent)",
529 .cra_driver_name = "ctr-serpent-avx",
530 .cra_priority = 500,
531 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
532 .cra_blocksize = 1,
533 .cra_ctxsize = sizeof(struct async_helper_ctx),
534 .cra_alignmask = 0,
535 .cra_type = &crypto_ablkcipher_type,
536 .cra_module = THIS_MODULE,
537 .cra_init = ablk_init,
538 .cra_exit = ablk_exit,
539 .cra_u = {
540 .ablkcipher = {
541 .min_keysize = SERPENT_MIN_KEY_SIZE,
542 .max_keysize = SERPENT_MAX_KEY_SIZE,
543 .ivsize = SERPENT_BLOCK_SIZE,
544 .setkey = ablk_set_key,
545 .encrypt = ablk_encrypt,
546 .decrypt = ablk_encrypt,
547 .geniv = "chainiv",
550 }, {
551 .cra_name = "lrw(serpent)",
552 .cra_driver_name = "lrw-serpent-avx",
553 .cra_priority = 500,
554 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
555 .cra_blocksize = SERPENT_BLOCK_SIZE,
556 .cra_ctxsize = sizeof(struct async_helper_ctx),
557 .cra_alignmask = 0,
558 .cra_type = &crypto_ablkcipher_type,
559 .cra_module = THIS_MODULE,
560 .cra_init = ablk_init,
561 .cra_exit = ablk_exit,
562 .cra_u = {
563 .ablkcipher = {
564 .min_keysize = SERPENT_MIN_KEY_SIZE +
565 SERPENT_BLOCK_SIZE,
566 .max_keysize = SERPENT_MAX_KEY_SIZE +
567 SERPENT_BLOCK_SIZE,
568 .ivsize = SERPENT_BLOCK_SIZE,
569 .setkey = ablk_set_key,
570 .encrypt = ablk_encrypt,
571 .decrypt = ablk_decrypt,
574 }, {
575 .cra_name = "xts(serpent)",
576 .cra_driver_name = "xts-serpent-avx",
577 .cra_priority = 500,
578 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
579 .cra_blocksize = SERPENT_BLOCK_SIZE,
580 .cra_ctxsize = sizeof(struct async_helper_ctx),
581 .cra_alignmask = 0,
582 .cra_type = &crypto_ablkcipher_type,
583 .cra_module = THIS_MODULE,
584 .cra_init = ablk_init,
585 .cra_exit = ablk_exit,
586 .cra_u = {
587 .ablkcipher = {
588 .min_keysize = SERPENT_MIN_KEY_SIZE * 2,
589 .max_keysize = SERPENT_MAX_KEY_SIZE * 2,
590 .ivsize = SERPENT_BLOCK_SIZE,
591 .setkey = ablk_set_key,
592 .encrypt = ablk_encrypt,
593 .decrypt = ablk_decrypt,
596 } };
598 static int __init serpent_init(void)
600 u64 xcr0;
602 if (!cpu_has_avx || !cpu_has_osxsave) {
603 printk(KERN_INFO "AVX instructions are not detected.\n");
604 return -ENODEV;
607 xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
608 if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
609 printk(KERN_INFO "AVX detected but unusable.\n");
610 return -ENODEV;
613 return crypto_register_algs(serpent_algs, ARRAY_SIZE(serpent_algs));
616 static void __exit serpent_exit(void)
618 crypto_unregister_algs(serpent_algs, ARRAY_SIZE(serpent_algs));
621 module_init(serpent_init);
622 module_exit(serpent_exit);
624 MODULE_DESCRIPTION("Serpent Cipher Algorithm, AVX optimized");
625 MODULE_LICENSE("GPL");
626 MODULE_ALIAS("serpent");