net: add support for Cavium PTP coprocessor
[linux-2.6/btrfs-unstable.git] / crypto / lrw.c
blobcbbd7c50ad19b306cf0e424ad24533beade38764
1 /* LRW: as defined by Cyril Guyot in
2 * http://grouper.ieee.org/groups/1619/email/pdf00017.pdf
4 * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org>
6 * Based on ecb.c
7 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
14 /* This implementation is checked against the test vectors in the above
15 * document and by a test vector provided by Ken Buchanan at
16 * http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html
18 * The test vectors are included in the testing module tcrypt.[ch] */
20 #include <crypto/internal/skcipher.h>
21 #include <crypto/scatterwalk.h>
22 #include <linux/err.h>
23 #include <linux/init.h>
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/scatterlist.h>
27 #include <linux/slab.h>
29 #include <crypto/b128ops.h>
30 #include <crypto/gf128mul.h>
31 #include <crypto/lrw.h>
33 #define LRW_BUFFER_SIZE 128u
35 struct priv {
36 struct crypto_skcipher *child;
37 struct lrw_table_ctx table;
40 struct rctx {
41 be128 buf[LRW_BUFFER_SIZE / sizeof(be128)];
43 be128 t;
45 be128 *ext;
47 struct scatterlist srcbuf[2];
48 struct scatterlist dstbuf[2];
49 struct scatterlist *src;
50 struct scatterlist *dst;
52 unsigned int left;
54 struct skcipher_request subreq;
57 static inline void setbit128_bbe(void *b, int bit)
59 __set_bit(bit ^ (0x80 -
60 #ifdef __BIG_ENDIAN
61 BITS_PER_LONG
62 #else
63 BITS_PER_BYTE
64 #endif
65 ), b);
68 int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak)
70 be128 tmp = { 0 };
71 int i;
73 if (ctx->table)
74 gf128mul_free_64k(ctx->table);
76 /* initialize multiplication table for Key2 */
77 ctx->table = gf128mul_init_64k_bbe((be128 *)tweak);
78 if (!ctx->table)
79 return -ENOMEM;
81 /* initialize optimization table */
82 for (i = 0; i < 128; i++) {
83 setbit128_bbe(&tmp, i);
84 ctx->mulinc[i] = tmp;
85 gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table);
88 return 0;
90 EXPORT_SYMBOL_GPL(lrw_init_table);
92 void lrw_free_table(struct lrw_table_ctx *ctx)
94 if (ctx->table)
95 gf128mul_free_64k(ctx->table);
97 EXPORT_SYMBOL_GPL(lrw_free_table);
99 static int setkey(struct crypto_skcipher *parent, const u8 *key,
100 unsigned int keylen)
102 struct priv *ctx = crypto_skcipher_ctx(parent);
103 struct crypto_skcipher *child = ctx->child;
104 int err, bsize = LRW_BLOCK_SIZE;
105 const u8 *tweak = key + keylen - bsize;
107 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
108 crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
109 CRYPTO_TFM_REQ_MASK);
110 err = crypto_skcipher_setkey(child, key, keylen - bsize);
111 crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
112 CRYPTO_TFM_RES_MASK);
113 if (err)
114 return err;
116 return lrw_init_table(&ctx->table, tweak);
119 static inline void inc(be128 *iv)
121 be64_add_cpu(&iv->b, 1);
122 if (!iv->b)
123 be64_add_cpu(&iv->a, 1);
126 /* this returns the number of consequative 1 bits starting
127 * from the right, get_index128(00 00 00 00 00 00 ... 00 00 10 FB) = 2 */
128 static inline int get_index128(be128 *block)
130 int x;
131 __be32 *p = (__be32 *) block;
133 for (p += 3, x = 0; x < 128; p--, x += 32) {
134 u32 val = be32_to_cpup(p);
136 if (!~val)
137 continue;
139 return x + ffz(val);
142 return x;
145 static int post_crypt(struct skcipher_request *req)
147 struct rctx *rctx = skcipher_request_ctx(req);
148 be128 *buf = rctx->ext ?: rctx->buf;
149 struct skcipher_request *subreq;
150 const int bs = LRW_BLOCK_SIZE;
151 struct skcipher_walk w;
152 struct scatterlist *sg;
153 unsigned offset;
154 int err;
156 subreq = &rctx->subreq;
157 err = skcipher_walk_virt(&w, subreq, false);
159 while (w.nbytes) {
160 unsigned int avail = w.nbytes;
161 be128 *wdst;
163 wdst = w.dst.virt.addr;
165 do {
166 be128_xor(wdst, buf++, wdst);
167 wdst++;
168 } while ((avail -= bs) >= bs);
170 err = skcipher_walk_done(&w, avail);
173 rctx->left -= subreq->cryptlen;
175 if (err || !rctx->left)
176 goto out;
178 rctx->dst = rctx->dstbuf;
180 scatterwalk_done(&w.out, 0, 1);
181 sg = w.out.sg;
182 offset = w.out.offset;
184 if (rctx->dst != sg) {
185 rctx->dst[0] = *sg;
186 sg_unmark_end(rctx->dst);
187 scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 0, 2);
189 rctx->dst[0].length -= offset - sg->offset;
190 rctx->dst[0].offset = offset;
192 out:
193 return err;
196 static int pre_crypt(struct skcipher_request *req)
198 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
199 struct rctx *rctx = skcipher_request_ctx(req);
200 struct priv *ctx = crypto_skcipher_ctx(tfm);
201 be128 *buf = rctx->ext ?: rctx->buf;
202 struct skcipher_request *subreq;
203 const int bs = LRW_BLOCK_SIZE;
204 struct skcipher_walk w;
205 struct scatterlist *sg;
206 unsigned cryptlen;
207 unsigned offset;
208 be128 *iv;
209 bool more;
210 int err;
212 subreq = &rctx->subreq;
213 skcipher_request_set_tfm(subreq, tfm);
215 cryptlen = subreq->cryptlen;
216 more = rctx->left > cryptlen;
217 if (!more)
218 cryptlen = rctx->left;
220 skcipher_request_set_crypt(subreq, rctx->src, rctx->dst,
221 cryptlen, req->iv);
223 err = skcipher_walk_virt(&w, subreq, false);
224 iv = w.iv;
226 while (w.nbytes) {
227 unsigned int avail = w.nbytes;
228 be128 *wsrc;
229 be128 *wdst;
231 wsrc = w.src.virt.addr;
232 wdst = w.dst.virt.addr;
234 do {
235 *buf++ = rctx->t;
236 be128_xor(wdst++, &rctx->t, wsrc++);
238 /* T <- I*Key2, using the optimization
239 * discussed in the specification */
240 be128_xor(&rctx->t, &rctx->t,
241 &ctx->table.mulinc[get_index128(iv)]);
242 inc(iv);
243 } while ((avail -= bs) >= bs);
245 err = skcipher_walk_done(&w, avail);
248 skcipher_request_set_tfm(subreq, ctx->child);
249 skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst,
250 cryptlen, NULL);
252 if (err || !more)
253 goto out;
255 rctx->src = rctx->srcbuf;
257 scatterwalk_done(&w.in, 0, 1);
258 sg = w.in.sg;
259 offset = w.in.offset;
261 if (rctx->src != sg) {
262 rctx->src[0] = *sg;
263 sg_unmark_end(rctx->src);
264 scatterwalk_crypto_chain(rctx->src, sg_next(sg), 0, 2);
266 rctx->src[0].length -= offset - sg->offset;
267 rctx->src[0].offset = offset;
269 out:
270 return err;
273 static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
275 struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
276 struct rctx *rctx = skcipher_request_ctx(req);
277 struct skcipher_request *subreq;
278 gfp_t gfp;
280 subreq = &rctx->subreq;
281 skcipher_request_set_callback(subreq, req->base.flags, done, req);
283 gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
284 GFP_ATOMIC;
285 rctx->ext = NULL;
287 subreq->cryptlen = LRW_BUFFER_SIZE;
288 if (req->cryptlen > LRW_BUFFER_SIZE) {
289 unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE);
291 rctx->ext = kmalloc(n, gfp);
292 if (rctx->ext)
293 subreq->cryptlen = n;
296 rctx->src = req->src;
297 rctx->dst = req->dst;
298 rctx->left = req->cryptlen;
300 /* calculate first value of T */
301 memcpy(&rctx->t, req->iv, sizeof(rctx->t));
303 /* T <- I*Key2 */
304 gf128mul_64k_bbe(&rctx->t, ctx->table.table);
306 return 0;
309 static void exit_crypt(struct skcipher_request *req)
311 struct rctx *rctx = skcipher_request_ctx(req);
313 rctx->left = 0;
315 if (rctx->ext)
316 kfree(rctx->ext);
319 static int do_encrypt(struct skcipher_request *req, int err)
321 struct rctx *rctx = skcipher_request_ctx(req);
322 struct skcipher_request *subreq;
324 subreq = &rctx->subreq;
326 while (!err && rctx->left) {
327 err = pre_crypt(req) ?:
328 crypto_skcipher_encrypt(subreq) ?:
329 post_crypt(req);
331 if (err == -EINPROGRESS || err == -EBUSY)
332 return err;
335 exit_crypt(req);
336 return err;
339 static void encrypt_done(struct crypto_async_request *areq, int err)
341 struct skcipher_request *req = areq->data;
342 struct skcipher_request *subreq;
343 struct rctx *rctx;
345 rctx = skcipher_request_ctx(req);
347 if (err == -EINPROGRESS) {
348 if (rctx->left != req->cryptlen)
349 return;
350 goto out;
353 subreq = &rctx->subreq;
354 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
356 err = do_encrypt(req, err ?: post_crypt(req));
357 if (rctx->left)
358 return;
360 out:
361 skcipher_request_complete(req, err);
364 static int encrypt(struct skcipher_request *req)
366 return do_encrypt(req, init_crypt(req, encrypt_done));
369 static int do_decrypt(struct skcipher_request *req, int err)
371 struct rctx *rctx = skcipher_request_ctx(req);
372 struct skcipher_request *subreq;
374 subreq = &rctx->subreq;
376 while (!err && rctx->left) {
377 err = pre_crypt(req) ?:
378 crypto_skcipher_decrypt(subreq) ?:
379 post_crypt(req);
381 if (err == -EINPROGRESS || err == -EBUSY)
382 return err;
385 exit_crypt(req);
386 return err;
389 static void decrypt_done(struct crypto_async_request *areq, int err)
391 struct skcipher_request *req = areq->data;
392 struct skcipher_request *subreq;
393 struct rctx *rctx;
395 rctx = skcipher_request_ctx(req);
397 if (err == -EINPROGRESS) {
398 if (rctx->left != req->cryptlen)
399 return;
400 goto out;
403 subreq = &rctx->subreq;
404 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
406 err = do_decrypt(req, err ?: post_crypt(req));
407 if (rctx->left)
408 return;
410 out:
411 skcipher_request_complete(req, err);
414 static int decrypt(struct skcipher_request *req)
416 return do_decrypt(req, init_crypt(req, decrypt_done));
419 int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
420 struct scatterlist *ssrc, unsigned int nbytes,
421 struct lrw_crypt_req *req)
423 const unsigned int bsize = LRW_BLOCK_SIZE;
424 const unsigned int max_blks = req->tbuflen / bsize;
425 struct lrw_table_ctx *ctx = req->table_ctx;
426 struct blkcipher_walk walk;
427 unsigned int nblocks;
428 be128 *iv, *src, *dst, *t;
429 be128 *t_buf = req->tbuf;
430 int err, i;
432 BUG_ON(max_blks < 1);
434 blkcipher_walk_init(&walk, sdst, ssrc, nbytes);
436 err = blkcipher_walk_virt(desc, &walk);
437 nbytes = walk.nbytes;
438 if (!nbytes)
439 return err;
441 nblocks = min(walk.nbytes / bsize, max_blks);
442 src = (be128 *)walk.src.virt.addr;
443 dst = (be128 *)walk.dst.virt.addr;
445 /* calculate first value of T */
446 iv = (be128 *)walk.iv;
447 t_buf[0] = *iv;
449 /* T <- I*Key2 */
450 gf128mul_64k_bbe(&t_buf[0], ctx->table);
452 i = 0;
453 goto first;
455 for (;;) {
456 do {
457 for (i = 0; i < nblocks; i++) {
458 /* T <- I*Key2, using the optimization
459 * discussed in the specification */
460 be128_xor(&t_buf[i], t,
461 &ctx->mulinc[get_index128(iv)]);
462 inc(iv);
463 first:
464 t = &t_buf[i];
466 /* PP <- T xor P */
467 be128_xor(dst + i, t, src + i);
470 /* CC <- E(Key2,PP) */
471 req->crypt_fn(req->crypt_ctx, (u8 *)dst,
472 nblocks * bsize);
474 /* C <- T xor CC */
475 for (i = 0; i < nblocks; i++)
476 be128_xor(dst + i, dst + i, &t_buf[i]);
478 src += nblocks;
479 dst += nblocks;
480 nbytes -= nblocks * bsize;
481 nblocks = min(nbytes / bsize, max_blks);
482 } while (nblocks > 0);
484 err = blkcipher_walk_done(desc, &walk, nbytes);
485 nbytes = walk.nbytes;
486 if (!nbytes)
487 break;
489 nblocks = min(nbytes / bsize, max_blks);
490 src = (be128 *)walk.src.virt.addr;
491 dst = (be128 *)walk.dst.virt.addr;
494 return err;
496 EXPORT_SYMBOL_GPL(lrw_crypt);
498 static int init_tfm(struct crypto_skcipher *tfm)
500 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
501 struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
502 struct priv *ctx = crypto_skcipher_ctx(tfm);
503 struct crypto_skcipher *cipher;
505 cipher = crypto_spawn_skcipher(spawn);
506 if (IS_ERR(cipher))
507 return PTR_ERR(cipher);
509 ctx->child = cipher;
511 crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(cipher) +
512 sizeof(struct rctx));
514 return 0;
517 static void exit_tfm(struct crypto_skcipher *tfm)
519 struct priv *ctx = crypto_skcipher_ctx(tfm);
521 lrw_free_table(&ctx->table);
522 crypto_free_skcipher(ctx->child);
525 static void free(struct skcipher_instance *inst)
527 crypto_drop_skcipher(skcipher_instance_ctx(inst));
528 kfree(inst);
531 static int create(struct crypto_template *tmpl, struct rtattr **tb)
533 struct crypto_skcipher_spawn *spawn;
534 struct skcipher_instance *inst;
535 struct crypto_attr_type *algt;
536 struct skcipher_alg *alg;
537 const char *cipher_name;
538 char ecb_name[CRYPTO_MAX_ALG_NAME];
539 int err;
541 algt = crypto_get_attr_type(tb);
542 if (IS_ERR(algt))
543 return PTR_ERR(algt);
545 if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
546 return -EINVAL;
548 cipher_name = crypto_attr_alg_name(tb[1]);
549 if (IS_ERR(cipher_name))
550 return PTR_ERR(cipher_name);
552 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
553 if (!inst)
554 return -ENOMEM;
556 spawn = skcipher_instance_ctx(inst);
558 crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst));
559 err = crypto_grab_skcipher(spawn, cipher_name, 0,
560 crypto_requires_sync(algt->type,
561 algt->mask));
562 if (err == -ENOENT) {
563 err = -ENAMETOOLONG;
564 if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
565 cipher_name) >= CRYPTO_MAX_ALG_NAME)
566 goto err_free_inst;
568 err = crypto_grab_skcipher(spawn, ecb_name, 0,
569 crypto_requires_sync(algt->type,
570 algt->mask));
573 if (err)
574 goto err_free_inst;
576 alg = crypto_skcipher_spawn_alg(spawn);
578 err = -EINVAL;
579 if (alg->base.cra_blocksize != LRW_BLOCK_SIZE)
580 goto err_drop_spawn;
582 if (crypto_skcipher_alg_ivsize(alg))
583 goto err_drop_spawn;
585 err = crypto_inst_setname(skcipher_crypto_instance(inst), "lrw",
586 &alg->base);
587 if (err)
588 goto err_drop_spawn;
590 err = -EINVAL;
591 cipher_name = alg->base.cra_name;
593 /* Alas we screwed up the naming so we have to mangle the
594 * cipher name.
596 if (!strncmp(cipher_name, "ecb(", 4)) {
597 unsigned len;
599 len = strlcpy(ecb_name, cipher_name + 4, sizeof(ecb_name));
600 if (len < 2 || len >= sizeof(ecb_name))
601 goto err_drop_spawn;
603 if (ecb_name[len - 1] != ')')
604 goto err_drop_spawn;
606 ecb_name[len - 1] = 0;
608 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
609 "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) {
610 err = -ENAMETOOLONG;
611 goto err_drop_spawn;
613 } else
614 goto err_drop_spawn;
616 inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
617 inst->alg.base.cra_priority = alg->base.cra_priority;
618 inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE;
619 inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
620 (__alignof__(u64) - 1);
622 inst->alg.ivsize = LRW_BLOCK_SIZE;
623 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) +
624 LRW_BLOCK_SIZE;
625 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) +
626 LRW_BLOCK_SIZE;
628 inst->alg.base.cra_ctxsize = sizeof(struct priv);
630 inst->alg.init = init_tfm;
631 inst->alg.exit = exit_tfm;
633 inst->alg.setkey = setkey;
634 inst->alg.encrypt = encrypt;
635 inst->alg.decrypt = decrypt;
637 inst->free = free;
639 err = skcipher_register_instance(tmpl, inst);
640 if (err)
641 goto err_drop_spawn;
643 out:
644 return err;
646 err_drop_spawn:
647 crypto_drop_skcipher(spawn);
648 err_free_inst:
649 kfree(inst);
650 goto out;
653 static struct crypto_template crypto_tmpl = {
654 .name = "lrw",
655 .create = create,
656 .module = THIS_MODULE,
659 static int __init crypto_module_init(void)
661 return crypto_register_template(&crypto_tmpl);
664 static void __exit crypto_module_exit(void)
666 crypto_unregister_template(&crypto_tmpl);
669 module_init(crypto_module_init);
670 module_exit(crypto_module_exit);
672 MODULE_LICENSE("GPL");
673 MODULE_DESCRIPTION("LRW block cipher mode");
674 MODULE_ALIAS_CRYPTO("lrw");