ARM: EXYNOS: Fix compilation error when CONFIG_OF is not defined
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / x86 / crypto / serpent_sse2_glue.c
blob4b21be85e0a139787f92d5d739c2c259dd7133fa
1 /*
2 * Glue Code for SSE2 assembler versions of Serpent Cipher
4 * Copyright (c) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
6 * Glue code based on aesni-intel_glue.c by:
7 * Copyright (C) 2008, Intel Corp.
8 * Author: Huang Ying <ying.huang@intel.com>
10 * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
11 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
12 * CTR part based on code (crypto/ctr.c) by:
13 * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
28 * USA
32 #include <linux/module.h>
33 #include <linux/hardirq.h>
34 #include <linux/types.h>
35 #include <linux/crypto.h>
36 #include <linux/err.h>
37 #include <crypto/algapi.h>
38 #include <crypto/serpent.h>
39 #include <crypto/cryptd.h>
40 #include <crypto/b128ops.h>
41 #include <crypto/ctr.h>
42 #include <crypto/lrw.h>
43 #include <crypto/xts.h>
44 #include <asm/i387.h>
45 #include <asm/serpent.h>
46 #include <crypto/scatterwalk.h>
47 #include <linux/workqueue.h>
48 #include <linux/spinlock.h>
50 struct async_serpent_ctx {
51 struct cryptd_ablkcipher *cryptd_tfm;
54 static inline bool serpent_fpu_begin(bool fpu_enabled, unsigned int nbytes)
56 if (fpu_enabled)
57 return true;
59 /* SSE2 is only used when chunk to be processed is large enough, so
60 * do not enable FPU until it is necessary.
62 if (nbytes < SERPENT_BLOCK_SIZE * SERPENT_PARALLEL_BLOCKS)
63 return false;
65 kernel_fpu_begin();
66 return true;
69 static inline void serpent_fpu_end(bool fpu_enabled)
71 if (fpu_enabled)
72 kernel_fpu_end();
75 static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
76 bool enc)
78 bool fpu_enabled = false;
79 struct serpent_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
80 const unsigned int bsize = SERPENT_BLOCK_SIZE;
81 unsigned int nbytes;
82 int err;
84 err = blkcipher_walk_virt(desc, walk);
85 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
87 while ((nbytes = walk->nbytes)) {
88 u8 *wsrc = walk->src.virt.addr;
89 u8 *wdst = walk->dst.virt.addr;
91 fpu_enabled = serpent_fpu_begin(fpu_enabled, nbytes);
93 /* Process multi-block batch */
94 if (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS) {
95 do {
96 if (enc)
97 serpent_enc_blk_xway(ctx, wdst, wsrc);
98 else
99 serpent_dec_blk_xway(ctx, wdst, wsrc);
101 wsrc += bsize * SERPENT_PARALLEL_BLOCKS;
102 wdst += bsize * SERPENT_PARALLEL_BLOCKS;
103 nbytes -= bsize * SERPENT_PARALLEL_BLOCKS;
104 } while (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS);
106 if (nbytes < bsize)
107 goto done;
110 /* Handle leftovers */
111 do {
112 if (enc)
113 __serpent_encrypt(ctx, wdst, wsrc);
114 else
115 __serpent_decrypt(ctx, wdst, wsrc);
117 wsrc += bsize;
118 wdst += bsize;
119 nbytes -= bsize;
120 } while (nbytes >= bsize);
122 done:
123 err = blkcipher_walk_done(desc, walk, nbytes);
126 serpent_fpu_end(fpu_enabled);
127 return err;
130 static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
131 struct scatterlist *src, unsigned int nbytes)
133 struct blkcipher_walk walk;
135 blkcipher_walk_init(&walk, dst, src, nbytes);
136 return ecb_crypt(desc, &walk, true);
139 static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
140 struct scatterlist *src, unsigned int nbytes)
142 struct blkcipher_walk walk;
144 blkcipher_walk_init(&walk, dst, src, nbytes);
145 return ecb_crypt(desc, &walk, false);
148 static unsigned int __cbc_encrypt(struct blkcipher_desc *desc,
149 struct blkcipher_walk *walk)
151 struct serpent_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
152 const unsigned int bsize = SERPENT_BLOCK_SIZE;
153 unsigned int nbytes = walk->nbytes;
154 u128 *src = (u128 *)walk->src.virt.addr;
155 u128 *dst = (u128 *)walk->dst.virt.addr;
156 u128 *iv = (u128 *)walk->iv;
158 do {
159 u128_xor(dst, src, iv);
160 __serpent_encrypt(ctx, (u8 *)dst, (u8 *)dst);
161 iv = dst;
163 src += 1;
164 dst += 1;
165 nbytes -= bsize;
166 } while (nbytes >= bsize);
168 u128_xor((u128 *)walk->iv, (u128 *)walk->iv, iv);
169 return nbytes;
172 static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
173 struct scatterlist *src, unsigned int nbytes)
175 struct blkcipher_walk walk;
176 int err;
178 blkcipher_walk_init(&walk, dst, src, nbytes);
179 err = blkcipher_walk_virt(desc, &walk);
181 while ((nbytes = walk.nbytes)) {
182 nbytes = __cbc_encrypt(desc, &walk);
183 err = blkcipher_walk_done(desc, &walk, nbytes);
186 return err;
189 static unsigned int __cbc_decrypt(struct blkcipher_desc *desc,
190 struct blkcipher_walk *walk)
192 struct serpent_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
193 const unsigned int bsize = SERPENT_BLOCK_SIZE;
194 unsigned int nbytes = walk->nbytes;
195 u128 *src = (u128 *)walk->src.virt.addr;
196 u128 *dst = (u128 *)walk->dst.virt.addr;
197 u128 ivs[SERPENT_PARALLEL_BLOCKS - 1];
198 u128 last_iv;
199 int i;
201 /* Start of the last block. */
202 src += nbytes / bsize - 1;
203 dst += nbytes / bsize - 1;
205 last_iv = *src;
207 /* Process multi-block batch */
208 if (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS) {
209 do {
210 nbytes -= bsize * (SERPENT_PARALLEL_BLOCKS - 1);
211 src -= SERPENT_PARALLEL_BLOCKS - 1;
212 dst -= SERPENT_PARALLEL_BLOCKS - 1;
214 for (i = 0; i < SERPENT_PARALLEL_BLOCKS - 1; i++)
215 ivs[i] = src[i];
217 serpent_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src);
219 for (i = 0; i < SERPENT_PARALLEL_BLOCKS - 1; i++)
220 u128_xor(dst + (i + 1), dst + (i + 1), ivs + i);
222 nbytes -= bsize;
223 if (nbytes < bsize)
224 goto done;
226 u128_xor(dst, dst, src - 1);
227 src -= 1;
228 dst -= 1;
229 } while (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS);
231 if (nbytes < bsize)
232 goto done;
235 /* Handle leftovers */
236 for (;;) {
237 __serpent_decrypt(ctx, (u8 *)dst, (u8 *)src);
239 nbytes -= bsize;
240 if (nbytes < bsize)
241 break;
243 u128_xor(dst, dst, src - 1);
244 src -= 1;
245 dst -= 1;
248 done:
249 u128_xor(dst, dst, (u128 *)walk->iv);
250 *(u128 *)walk->iv = last_iv;
252 return nbytes;
255 static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
256 struct scatterlist *src, unsigned int nbytes)
258 bool fpu_enabled = false;
259 struct blkcipher_walk walk;
260 int err;
262 blkcipher_walk_init(&walk, dst, src, nbytes);
263 err = blkcipher_walk_virt(desc, &walk);
264 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
266 while ((nbytes = walk.nbytes)) {
267 fpu_enabled = serpent_fpu_begin(fpu_enabled, nbytes);
268 nbytes = __cbc_decrypt(desc, &walk);
269 err = blkcipher_walk_done(desc, &walk, nbytes);
272 serpent_fpu_end(fpu_enabled);
273 return err;
276 static inline void u128_to_be128(be128 *dst, const u128 *src)
278 dst->a = cpu_to_be64(src->a);
279 dst->b = cpu_to_be64(src->b);
282 static inline void be128_to_u128(u128 *dst, const be128 *src)
284 dst->a = be64_to_cpu(src->a);
285 dst->b = be64_to_cpu(src->b);
288 static inline void u128_inc(u128 *i)
290 i->b++;
291 if (!i->b)
292 i->a++;
295 static void ctr_crypt_final(struct blkcipher_desc *desc,
296 struct blkcipher_walk *walk)
298 struct serpent_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
299 u8 *ctrblk = walk->iv;
300 u8 keystream[SERPENT_BLOCK_SIZE];
301 u8 *src = walk->src.virt.addr;
302 u8 *dst = walk->dst.virt.addr;
303 unsigned int nbytes = walk->nbytes;
305 __serpent_encrypt(ctx, keystream, ctrblk);
306 crypto_xor(keystream, src, nbytes);
307 memcpy(dst, keystream, nbytes);
309 crypto_inc(ctrblk, SERPENT_BLOCK_SIZE);
312 static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
313 struct blkcipher_walk *walk)
315 struct serpent_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
316 const unsigned int bsize = SERPENT_BLOCK_SIZE;
317 unsigned int nbytes = walk->nbytes;
318 u128 *src = (u128 *)walk->src.virt.addr;
319 u128 *dst = (u128 *)walk->dst.virt.addr;
320 u128 ctrblk;
321 be128 ctrblocks[SERPENT_PARALLEL_BLOCKS];
322 int i;
324 be128_to_u128(&ctrblk, (be128 *)walk->iv);
326 /* Process multi-block batch */
327 if (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS) {
328 do {
329 /* create ctrblks for parallel encrypt */
330 for (i = 0; i < SERPENT_PARALLEL_BLOCKS; i++) {
331 if (dst != src)
332 dst[i] = src[i];
334 u128_to_be128(&ctrblocks[i], &ctrblk);
335 u128_inc(&ctrblk);
338 serpent_enc_blk_xway_xor(ctx, (u8 *)dst,
339 (u8 *)ctrblocks);
341 src += SERPENT_PARALLEL_BLOCKS;
342 dst += SERPENT_PARALLEL_BLOCKS;
343 nbytes -= bsize * SERPENT_PARALLEL_BLOCKS;
344 } while (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS);
346 if (nbytes < bsize)
347 goto done;
350 /* Handle leftovers */
351 do {
352 if (dst != src)
353 *dst = *src;
355 u128_to_be128(&ctrblocks[0], &ctrblk);
356 u128_inc(&ctrblk);
358 __serpent_encrypt(ctx, (u8 *)ctrblocks, (u8 *)ctrblocks);
359 u128_xor(dst, dst, (u128 *)ctrblocks);
361 src += 1;
362 dst += 1;
363 nbytes -= bsize;
364 } while (nbytes >= bsize);
366 done:
367 u128_to_be128((be128 *)walk->iv, &ctrblk);
368 return nbytes;
371 static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
372 struct scatterlist *src, unsigned int nbytes)
374 bool fpu_enabled = false;
375 struct blkcipher_walk walk;
376 int err;
378 blkcipher_walk_init(&walk, dst, src, nbytes);
379 err = blkcipher_walk_virt_block(desc, &walk, SERPENT_BLOCK_SIZE);
380 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
382 while ((nbytes = walk.nbytes) >= SERPENT_BLOCK_SIZE) {
383 fpu_enabled = serpent_fpu_begin(fpu_enabled, nbytes);
384 nbytes = __ctr_crypt(desc, &walk);
385 err = blkcipher_walk_done(desc, &walk, nbytes);
388 serpent_fpu_end(fpu_enabled);
390 if (walk.nbytes) {
391 ctr_crypt_final(desc, &walk);
392 err = blkcipher_walk_done(desc, &walk, 0);
395 return err;
398 struct crypt_priv {
399 struct serpent_ctx *ctx;
400 bool fpu_enabled;
403 static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
405 const unsigned int bsize = SERPENT_BLOCK_SIZE;
406 struct crypt_priv *ctx = priv;
407 int i;
409 ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
411 if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
412 serpent_enc_blk_xway(ctx->ctx, srcdst, srcdst);
413 return;
416 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
417 __serpent_encrypt(ctx->ctx, srcdst, srcdst);
420 static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
422 const unsigned int bsize = SERPENT_BLOCK_SIZE;
423 struct crypt_priv *ctx = priv;
424 int i;
426 ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
428 if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
429 serpent_dec_blk_xway(ctx->ctx, srcdst, srcdst);
430 return;
433 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
434 __serpent_decrypt(ctx->ctx, srcdst, srcdst);
437 struct serpent_lrw_ctx {
438 struct lrw_table_ctx lrw_table;
439 struct serpent_ctx serpent_ctx;
442 static int lrw_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
443 unsigned int keylen)
445 struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
446 int err;
448 err = __serpent_setkey(&ctx->serpent_ctx, key, keylen -
449 SERPENT_BLOCK_SIZE);
450 if (err)
451 return err;
453 return lrw_init_table(&ctx->lrw_table, key + keylen -
454 SERPENT_BLOCK_SIZE);
457 static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
458 struct scatterlist *src, unsigned int nbytes)
460 struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
461 be128 buf[SERPENT_PARALLEL_BLOCKS];
462 struct crypt_priv crypt_ctx = {
463 .ctx = &ctx->serpent_ctx,
464 .fpu_enabled = false,
466 struct lrw_crypt_req req = {
467 .tbuf = buf,
468 .tbuflen = sizeof(buf),
470 .table_ctx = &ctx->lrw_table,
471 .crypt_ctx = &crypt_ctx,
472 .crypt_fn = encrypt_callback,
474 int ret;
476 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
477 ret = lrw_crypt(desc, dst, src, nbytes, &req);
478 serpent_fpu_end(crypt_ctx.fpu_enabled);
480 return ret;
483 static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
484 struct scatterlist *src, unsigned int nbytes)
486 struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
487 be128 buf[SERPENT_PARALLEL_BLOCKS];
488 struct crypt_priv crypt_ctx = {
489 .ctx = &ctx->serpent_ctx,
490 .fpu_enabled = false,
492 struct lrw_crypt_req req = {
493 .tbuf = buf,
494 .tbuflen = sizeof(buf),
496 .table_ctx = &ctx->lrw_table,
497 .crypt_ctx = &crypt_ctx,
498 .crypt_fn = decrypt_callback,
500 int ret;
502 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
503 ret = lrw_crypt(desc, dst, src, nbytes, &req);
504 serpent_fpu_end(crypt_ctx.fpu_enabled);
506 return ret;
509 static void lrw_exit_tfm(struct crypto_tfm *tfm)
511 struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
513 lrw_free_table(&ctx->lrw_table);
516 struct serpent_xts_ctx {
517 struct serpent_ctx tweak_ctx;
518 struct serpent_ctx crypt_ctx;
521 static int xts_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
522 unsigned int keylen)
524 struct serpent_xts_ctx *ctx = crypto_tfm_ctx(tfm);
525 u32 *flags = &tfm->crt_flags;
526 int err;
528 /* key consists of keys of equal size concatenated, therefore
529 * the length must be even
531 if (keylen % 2) {
532 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
533 return -EINVAL;
536 /* first half of xts-key is for crypt */
537 err = __serpent_setkey(&ctx->crypt_ctx, key, keylen / 2);
538 if (err)
539 return err;
541 /* second half of xts-key is for tweak */
542 return __serpent_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2);
545 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
546 struct scatterlist *src, unsigned int nbytes)
548 struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
549 be128 buf[SERPENT_PARALLEL_BLOCKS];
550 struct crypt_priv crypt_ctx = {
551 .ctx = &ctx->crypt_ctx,
552 .fpu_enabled = false,
554 struct xts_crypt_req req = {
555 .tbuf = buf,
556 .tbuflen = sizeof(buf),
558 .tweak_ctx = &ctx->tweak_ctx,
559 .tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt),
560 .crypt_ctx = &crypt_ctx,
561 .crypt_fn = encrypt_callback,
563 int ret;
565 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
566 ret = xts_crypt(desc, dst, src, nbytes, &req);
567 serpent_fpu_end(crypt_ctx.fpu_enabled);
569 return ret;
572 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
573 struct scatterlist *src, unsigned int nbytes)
575 struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
576 be128 buf[SERPENT_PARALLEL_BLOCKS];
577 struct crypt_priv crypt_ctx = {
578 .ctx = &ctx->crypt_ctx,
579 .fpu_enabled = false,
581 struct xts_crypt_req req = {
582 .tbuf = buf,
583 .tbuflen = sizeof(buf),
585 .tweak_ctx = &ctx->tweak_ctx,
586 .tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt),
587 .crypt_ctx = &crypt_ctx,
588 .crypt_fn = decrypt_callback,
590 int ret;
592 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
593 ret = xts_crypt(desc, dst, src, nbytes, &req);
594 serpent_fpu_end(crypt_ctx.fpu_enabled);
596 return ret;
599 static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
600 unsigned int key_len)
602 struct async_serpent_ctx *ctx = crypto_ablkcipher_ctx(tfm);
603 struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base;
604 int err;
606 crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
607 crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm)
608 & CRYPTO_TFM_REQ_MASK);
609 err = crypto_ablkcipher_setkey(child, key, key_len);
610 crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child)
611 & CRYPTO_TFM_RES_MASK);
612 return err;
615 static int __ablk_encrypt(struct ablkcipher_request *req)
617 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
618 struct async_serpent_ctx *ctx = crypto_ablkcipher_ctx(tfm);
619 struct blkcipher_desc desc;
621 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
622 desc.info = req->info;
623 desc.flags = 0;
625 return crypto_blkcipher_crt(desc.tfm)->encrypt(
626 &desc, req->dst, req->src, req->nbytes);
629 static int ablk_encrypt(struct ablkcipher_request *req)
631 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
632 struct async_serpent_ctx *ctx = crypto_ablkcipher_ctx(tfm);
634 if (!irq_fpu_usable()) {
635 struct ablkcipher_request *cryptd_req =
636 ablkcipher_request_ctx(req);
638 memcpy(cryptd_req, req, sizeof(*req));
639 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
641 return crypto_ablkcipher_encrypt(cryptd_req);
642 } else {
643 return __ablk_encrypt(req);
647 static int ablk_decrypt(struct ablkcipher_request *req)
649 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
650 struct async_serpent_ctx *ctx = crypto_ablkcipher_ctx(tfm);
652 if (!irq_fpu_usable()) {
653 struct ablkcipher_request *cryptd_req =
654 ablkcipher_request_ctx(req);
656 memcpy(cryptd_req, req, sizeof(*req));
657 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
659 return crypto_ablkcipher_decrypt(cryptd_req);
660 } else {
661 struct blkcipher_desc desc;
663 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
664 desc.info = req->info;
665 desc.flags = 0;
667 return crypto_blkcipher_crt(desc.tfm)->decrypt(
668 &desc, req->dst, req->src, req->nbytes);
672 static void ablk_exit(struct crypto_tfm *tfm)
674 struct async_serpent_ctx *ctx = crypto_tfm_ctx(tfm);
676 cryptd_free_ablkcipher(ctx->cryptd_tfm);
679 static int ablk_init(struct crypto_tfm *tfm)
681 struct async_serpent_ctx *ctx = crypto_tfm_ctx(tfm);
682 struct cryptd_ablkcipher *cryptd_tfm;
683 char drv_name[CRYPTO_MAX_ALG_NAME];
685 snprintf(drv_name, sizeof(drv_name), "__driver-%s",
686 crypto_tfm_alg_driver_name(tfm));
688 cryptd_tfm = cryptd_alloc_ablkcipher(drv_name, 0, 0);
689 if (IS_ERR(cryptd_tfm))
690 return PTR_ERR(cryptd_tfm);
692 ctx->cryptd_tfm = cryptd_tfm;
693 tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
694 crypto_ablkcipher_reqsize(&cryptd_tfm->base);
696 return 0;
699 static struct crypto_alg serpent_algs[10] = { {
700 .cra_name = "__ecb-serpent-sse2",
701 .cra_driver_name = "__driver-ecb-serpent-sse2",
702 .cra_priority = 0,
703 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
704 .cra_blocksize = SERPENT_BLOCK_SIZE,
705 .cra_ctxsize = sizeof(struct serpent_ctx),
706 .cra_alignmask = 0,
707 .cra_type = &crypto_blkcipher_type,
708 .cra_module = THIS_MODULE,
709 .cra_list = LIST_HEAD_INIT(serpent_algs[0].cra_list),
710 .cra_u = {
711 .blkcipher = {
712 .min_keysize = SERPENT_MIN_KEY_SIZE,
713 .max_keysize = SERPENT_MAX_KEY_SIZE,
714 .setkey = serpent_setkey,
715 .encrypt = ecb_encrypt,
716 .decrypt = ecb_decrypt,
719 }, {
720 .cra_name = "__cbc-serpent-sse2",
721 .cra_driver_name = "__driver-cbc-serpent-sse2",
722 .cra_priority = 0,
723 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
724 .cra_blocksize = SERPENT_BLOCK_SIZE,
725 .cra_ctxsize = sizeof(struct serpent_ctx),
726 .cra_alignmask = 0,
727 .cra_type = &crypto_blkcipher_type,
728 .cra_module = THIS_MODULE,
729 .cra_list = LIST_HEAD_INIT(serpent_algs[1].cra_list),
730 .cra_u = {
731 .blkcipher = {
732 .min_keysize = SERPENT_MIN_KEY_SIZE,
733 .max_keysize = SERPENT_MAX_KEY_SIZE,
734 .setkey = serpent_setkey,
735 .encrypt = cbc_encrypt,
736 .decrypt = cbc_decrypt,
739 }, {
740 .cra_name = "__ctr-serpent-sse2",
741 .cra_driver_name = "__driver-ctr-serpent-sse2",
742 .cra_priority = 0,
743 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
744 .cra_blocksize = 1,
745 .cra_ctxsize = sizeof(struct serpent_ctx),
746 .cra_alignmask = 0,
747 .cra_type = &crypto_blkcipher_type,
748 .cra_module = THIS_MODULE,
749 .cra_list = LIST_HEAD_INIT(serpent_algs[2].cra_list),
750 .cra_u = {
751 .blkcipher = {
752 .min_keysize = SERPENT_MIN_KEY_SIZE,
753 .max_keysize = SERPENT_MAX_KEY_SIZE,
754 .ivsize = SERPENT_BLOCK_SIZE,
755 .setkey = serpent_setkey,
756 .encrypt = ctr_crypt,
757 .decrypt = ctr_crypt,
760 }, {
761 .cra_name = "__lrw-serpent-sse2",
762 .cra_driver_name = "__driver-lrw-serpent-sse2",
763 .cra_priority = 0,
764 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
765 .cra_blocksize = SERPENT_BLOCK_SIZE,
766 .cra_ctxsize = sizeof(struct serpent_lrw_ctx),
767 .cra_alignmask = 0,
768 .cra_type = &crypto_blkcipher_type,
769 .cra_module = THIS_MODULE,
770 .cra_list = LIST_HEAD_INIT(serpent_algs[3].cra_list),
771 .cra_exit = lrw_exit_tfm,
772 .cra_u = {
773 .blkcipher = {
774 .min_keysize = SERPENT_MIN_KEY_SIZE +
775 SERPENT_BLOCK_SIZE,
776 .max_keysize = SERPENT_MAX_KEY_SIZE +
777 SERPENT_BLOCK_SIZE,
778 .ivsize = SERPENT_BLOCK_SIZE,
779 .setkey = lrw_serpent_setkey,
780 .encrypt = lrw_encrypt,
781 .decrypt = lrw_decrypt,
784 }, {
785 .cra_name = "__xts-serpent-sse2",
786 .cra_driver_name = "__driver-xts-serpent-sse2",
787 .cra_priority = 0,
788 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
789 .cra_blocksize = SERPENT_BLOCK_SIZE,
790 .cra_ctxsize = sizeof(struct serpent_xts_ctx),
791 .cra_alignmask = 0,
792 .cra_type = &crypto_blkcipher_type,
793 .cra_module = THIS_MODULE,
794 .cra_list = LIST_HEAD_INIT(serpent_algs[4].cra_list),
795 .cra_u = {
796 .blkcipher = {
797 .min_keysize = SERPENT_MIN_KEY_SIZE * 2,
798 .max_keysize = SERPENT_MAX_KEY_SIZE * 2,
799 .ivsize = SERPENT_BLOCK_SIZE,
800 .setkey = xts_serpent_setkey,
801 .encrypt = xts_encrypt,
802 .decrypt = xts_decrypt,
805 }, {
806 .cra_name = "ecb(serpent)",
807 .cra_driver_name = "ecb-serpent-sse2",
808 .cra_priority = 400,
809 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
810 .cra_blocksize = SERPENT_BLOCK_SIZE,
811 .cra_ctxsize = sizeof(struct async_serpent_ctx),
812 .cra_alignmask = 0,
813 .cra_type = &crypto_ablkcipher_type,
814 .cra_module = THIS_MODULE,
815 .cra_list = LIST_HEAD_INIT(serpent_algs[5].cra_list),
816 .cra_init = ablk_init,
817 .cra_exit = ablk_exit,
818 .cra_u = {
819 .ablkcipher = {
820 .min_keysize = SERPENT_MIN_KEY_SIZE,
821 .max_keysize = SERPENT_MAX_KEY_SIZE,
822 .setkey = ablk_set_key,
823 .encrypt = ablk_encrypt,
824 .decrypt = ablk_decrypt,
827 }, {
828 .cra_name = "cbc(serpent)",
829 .cra_driver_name = "cbc-serpent-sse2",
830 .cra_priority = 400,
831 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
832 .cra_blocksize = SERPENT_BLOCK_SIZE,
833 .cra_ctxsize = sizeof(struct async_serpent_ctx),
834 .cra_alignmask = 0,
835 .cra_type = &crypto_ablkcipher_type,
836 .cra_module = THIS_MODULE,
837 .cra_list = LIST_HEAD_INIT(serpent_algs[6].cra_list),
838 .cra_init = ablk_init,
839 .cra_exit = ablk_exit,
840 .cra_u = {
841 .ablkcipher = {
842 .min_keysize = SERPENT_MIN_KEY_SIZE,
843 .max_keysize = SERPENT_MAX_KEY_SIZE,
844 .ivsize = SERPENT_BLOCK_SIZE,
845 .setkey = ablk_set_key,
846 .encrypt = __ablk_encrypt,
847 .decrypt = ablk_decrypt,
850 }, {
851 .cra_name = "ctr(serpent)",
852 .cra_driver_name = "ctr-serpent-sse2",
853 .cra_priority = 400,
854 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
855 .cra_blocksize = 1,
856 .cra_ctxsize = sizeof(struct async_serpent_ctx),
857 .cra_alignmask = 0,
858 .cra_type = &crypto_ablkcipher_type,
859 .cra_module = THIS_MODULE,
860 .cra_list = LIST_HEAD_INIT(serpent_algs[7].cra_list),
861 .cra_init = ablk_init,
862 .cra_exit = ablk_exit,
863 .cra_u = {
864 .ablkcipher = {
865 .min_keysize = SERPENT_MIN_KEY_SIZE,
866 .max_keysize = SERPENT_MAX_KEY_SIZE,
867 .ivsize = SERPENT_BLOCK_SIZE,
868 .setkey = ablk_set_key,
869 .encrypt = ablk_encrypt,
870 .decrypt = ablk_encrypt,
871 .geniv = "chainiv",
874 }, {
875 .cra_name = "lrw(serpent)",
876 .cra_driver_name = "lrw-serpent-sse2",
877 .cra_priority = 400,
878 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
879 .cra_blocksize = SERPENT_BLOCK_SIZE,
880 .cra_ctxsize = sizeof(struct async_serpent_ctx),
881 .cra_alignmask = 0,
882 .cra_type = &crypto_ablkcipher_type,
883 .cra_module = THIS_MODULE,
884 .cra_list = LIST_HEAD_INIT(serpent_algs[8].cra_list),
885 .cra_init = ablk_init,
886 .cra_exit = ablk_exit,
887 .cra_u = {
888 .ablkcipher = {
889 .min_keysize = SERPENT_MIN_KEY_SIZE +
890 SERPENT_BLOCK_SIZE,
891 .max_keysize = SERPENT_MAX_KEY_SIZE +
892 SERPENT_BLOCK_SIZE,
893 .ivsize = SERPENT_BLOCK_SIZE,
894 .setkey = ablk_set_key,
895 .encrypt = ablk_encrypt,
896 .decrypt = ablk_decrypt,
899 }, {
900 .cra_name = "xts(serpent)",
901 .cra_driver_name = "xts-serpent-sse2",
902 .cra_priority = 400,
903 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
904 .cra_blocksize = SERPENT_BLOCK_SIZE,
905 .cra_ctxsize = sizeof(struct async_serpent_ctx),
906 .cra_alignmask = 0,
907 .cra_type = &crypto_ablkcipher_type,
908 .cra_module = THIS_MODULE,
909 .cra_list = LIST_HEAD_INIT(serpent_algs[9].cra_list),
910 .cra_init = ablk_init,
911 .cra_exit = ablk_exit,
912 .cra_u = {
913 .ablkcipher = {
914 .min_keysize = SERPENT_MIN_KEY_SIZE * 2,
915 .max_keysize = SERPENT_MAX_KEY_SIZE * 2,
916 .ivsize = SERPENT_BLOCK_SIZE,
917 .setkey = ablk_set_key,
918 .encrypt = ablk_encrypt,
919 .decrypt = ablk_decrypt,
922 } };
924 static int __init serpent_sse2_init(void)
926 if (!cpu_has_xmm2) {
927 printk(KERN_INFO "SSE2 instructions are not detected.\n");
928 return -ENODEV;
931 return crypto_register_algs(serpent_algs, ARRAY_SIZE(serpent_algs));
934 static void __exit serpent_sse2_exit(void)
936 crypto_unregister_algs(serpent_algs, ARRAY_SIZE(serpent_algs));
939 module_init(serpent_sse2_init);
940 module_exit(serpent_sse2_exit);
942 MODULE_DESCRIPTION("Serpent Cipher Algorithm, SSE2 optimized");
943 MODULE_LICENSE("GPL");
944 MODULE_ALIAS("serpent");