2 * Asynchronous block chaining cipher operations.
4 * This is the asynchronous version of blkcipher.c indicating completion
7 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
16 #include <crypto/internal/skcipher.h>
17 #include <linux/cpumask.h>
18 #include <linux/err.h>
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/rtnetlink.h>
23 #include <linux/sched.h>
24 #include <linux/slab.h>
25 #include <linux/seq_file.h>
27 #include <crypto/scatterwalk.h>
31 static const char *skcipher_default_geniv __read_mostly
;
33 struct ablkcipher_buffer
{
34 struct list_head entry
;
35 struct scatter_walk dst
;
41 ABLKCIPHER_WALK_SLOW
= 1 << 0,
44 static inline void ablkcipher_buffer_write(struct ablkcipher_buffer
*p
)
46 scatterwalk_copychunks(p
->data
, &p
->dst
, p
->len
, 1);
49 void __ablkcipher_walk_complete(struct ablkcipher_walk
*walk
)
51 struct ablkcipher_buffer
*p
, *tmp
;
53 list_for_each_entry_safe(p
, tmp
, &walk
->buffers
, entry
) {
54 ablkcipher_buffer_write(p
);
59 EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete
);
61 static inline void ablkcipher_queue_write(struct ablkcipher_walk
*walk
,
62 struct ablkcipher_buffer
*p
)
65 list_add_tail(&p
->entry
, &walk
->buffers
);
68 /* Get a spot of the specified length that does not straddle a page.
69 * The caller needs to ensure that there is enough space for this operation.
71 static inline u8
*ablkcipher_get_spot(u8
*start
, unsigned int len
)
73 u8
*end_page
= (u8
*)(((unsigned long)(start
+ len
- 1)) & PAGE_MASK
);
74 return max(start
, end_page
);
77 static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk
*walk
,
80 unsigned int n
= bsize
;
83 unsigned int len_this_page
= scatterwalk_pagelen(&walk
->out
);
85 if (len_this_page
> n
)
87 scatterwalk_advance(&walk
->out
, n
);
88 if (n
== len_this_page
)
91 scatterwalk_start(&walk
->out
, scatterwalk_sg_next(walk
->out
.sg
));
97 static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk
*walk
,
100 scatterwalk_advance(&walk
->in
, n
);
101 scatterwalk_advance(&walk
->out
, n
);
106 static int ablkcipher_walk_next(struct ablkcipher_request
*req
,
107 struct ablkcipher_walk
*walk
);
109 int ablkcipher_walk_done(struct ablkcipher_request
*req
,
110 struct ablkcipher_walk
*walk
, int err
)
112 struct crypto_tfm
*tfm
= req
->base
.tfm
;
113 unsigned int nbytes
= 0;
115 if (likely(err
>= 0)) {
116 unsigned int n
= walk
->nbytes
- err
;
118 if (likely(!(walk
->flags
& ABLKCIPHER_WALK_SLOW
)))
119 n
= ablkcipher_done_fast(walk
, n
);
120 else if (WARN_ON(err
)) {
124 n
= ablkcipher_done_slow(walk
, n
);
126 nbytes
= walk
->total
- n
;
130 scatterwalk_done(&walk
->in
, 0, nbytes
);
131 scatterwalk_done(&walk
->out
, 1, nbytes
);
134 walk
->total
= nbytes
;
135 walk
->nbytes
= nbytes
;
138 crypto_yield(req
->base
.flags
);
139 return ablkcipher_walk_next(req
, walk
);
142 if (walk
->iv
!= req
->info
)
143 memcpy(req
->info
, walk
->iv
, tfm
->crt_ablkcipher
.ivsize
);
144 kfree(walk
->iv_buffer
);
148 EXPORT_SYMBOL_GPL(ablkcipher_walk_done
);
150 static inline int ablkcipher_next_slow(struct ablkcipher_request
*req
,
151 struct ablkcipher_walk
*walk
,
153 unsigned int alignmask
,
154 void **src_p
, void **dst_p
)
156 unsigned aligned_bsize
= ALIGN(bsize
, alignmask
+ 1);
157 struct ablkcipher_buffer
*p
;
158 void *src
, *dst
, *base
;
161 n
= ALIGN(sizeof(struct ablkcipher_buffer
), alignmask
+ 1);
162 n
+= (aligned_bsize
* 3 - (alignmask
+ 1) +
163 (alignmask
& ~(crypto_tfm_ctx_alignment() - 1)));
165 p
= kmalloc(n
, GFP_ATOMIC
);
167 return ablkcipher_walk_done(req
, walk
, -ENOMEM
);
171 dst
= (u8
*)ALIGN((unsigned long)base
, alignmask
+ 1);
172 src
= dst
= ablkcipher_get_spot(dst
, bsize
);
177 scatterwalk_copychunks(src
, &walk
->in
, bsize
, 0);
179 ablkcipher_queue_write(walk
, p
);
181 walk
->nbytes
= bsize
;
182 walk
->flags
|= ABLKCIPHER_WALK_SLOW
;
190 static inline int ablkcipher_copy_iv(struct ablkcipher_walk
*walk
,
191 struct crypto_tfm
*tfm
,
192 unsigned int alignmask
)
194 unsigned bs
= walk
->blocksize
;
195 unsigned int ivsize
= tfm
->crt_ablkcipher
.ivsize
;
196 unsigned aligned_bs
= ALIGN(bs
, alignmask
+ 1);
197 unsigned int size
= aligned_bs
* 2 + ivsize
+ max(aligned_bs
, ivsize
) -
201 size
+= alignmask
& ~(crypto_tfm_ctx_alignment() - 1);
202 walk
->iv_buffer
= kmalloc(size
, GFP_ATOMIC
);
203 if (!walk
->iv_buffer
)
206 iv
= (u8
*)ALIGN((unsigned long)walk
->iv_buffer
, alignmask
+ 1);
207 iv
= ablkcipher_get_spot(iv
, bs
) + aligned_bs
;
208 iv
= ablkcipher_get_spot(iv
, bs
) + aligned_bs
;
209 iv
= ablkcipher_get_spot(iv
, ivsize
);
211 walk
->iv
= memcpy(iv
, walk
->iv
, ivsize
);
215 static inline int ablkcipher_next_fast(struct ablkcipher_request
*req
,
216 struct ablkcipher_walk
*walk
)
218 walk
->src
.page
= scatterwalk_page(&walk
->in
);
219 walk
->src
.offset
= offset_in_page(walk
->in
.offset
);
220 walk
->dst
.page
= scatterwalk_page(&walk
->out
);
221 walk
->dst
.offset
= offset_in_page(walk
->out
.offset
);
226 static int ablkcipher_walk_next(struct ablkcipher_request
*req
,
227 struct ablkcipher_walk
*walk
)
229 struct crypto_tfm
*tfm
= req
->base
.tfm
;
230 unsigned int alignmask
, bsize
, n
;
234 alignmask
= crypto_tfm_alg_alignmask(tfm
);
236 if (unlikely(n
< crypto_tfm_alg_blocksize(tfm
))) {
237 req
->base
.flags
|= CRYPTO_TFM_RES_BAD_BLOCK_LEN
;
238 return ablkcipher_walk_done(req
, walk
, -EINVAL
);
241 walk
->flags
&= ~ABLKCIPHER_WALK_SLOW
;
244 bsize
= min(walk
->blocksize
, n
);
245 n
= scatterwalk_clamp(&walk
->in
, n
);
246 n
= scatterwalk_clamp(&walk
->out
, n
);
249 !scatterwalk_aligned(&walk
->in
, alignmask
) ||
250 !scatterwalk_aligned(&walk
->out
, alignmask
)) {
251 err
= ablkcipher_next_slow(req
, walk
, bsize
, alignmask
,
253 goto set_phys_lowmem
;
258 return ablkcipher_next_fast(req
, walk
);
262 walk
->src
.page
= virt_to_page(src
);
263 walk
->dst
.page
= virt_to_page(dst
);
264 walk
->src
.offset
= ((unsigned long)src
& (PAGE_SIZE
- 1));
265 walk
->dst
.offset
= ((unsigned long)dst
& (PAGE_SIZE
- 1));
271 static int ablkcipher_walk_first(struct ablkcipher_request
*req
,
272 struct ablkcipher_walk
*walk
)
274 struct crypto_tfm
*tfm
= req
->base
.tfm
;
275 unsigned int alignmask
;
277 alignmask
= crypto_tfm_alg_alignmask(tfm
);
278 if (WARN_ON_ONCE(in_irq()))
281 walk
->nbytes
= walk
->total
;
282 if (unlikely(!walk
->total
))
285 walk
->iv_buffer
= NULL
;
286 walk
->iv
= req
->info
;
287 if (unlikely(((unsigned long)walk
->iv
& alignmask
))) {
288 int err
= ablkcipher_copy_iv(walk
, tfm
, alignmask
);
293 scatterwalk_start(&walk
->in
, walk
->in
.sg
);
294 scatterwalk_start(&walk
->out
, walk
->out
.sg
);
296 return ablkcipher_walk_next(req
, walk
);
299 int ablkcipher_walk_phys(struct ablkcipher_request
*req
,
300 struct ablkcipher_walk
*walk
)
302 walk
->blocksize
= crypto_tfm_alg_blocksize(req
->base
.tfm
);
303 return ablkcipher_walk_first(req
, walk
);
305 EXPORT_SYMBOL_GPL(ablkcipher_walk_phys
);
307 static int setkey_unaligned(struct crypto_ablkcipher
*tfm
, const u8
*key
,
310 struct ablkcipher_alg
*cipher
= crypto_ablkcipher_alg(tfm
);
311 unsigned long alignmask
= crypto_ablkcipher_alignmask(tfm
);
313 u8
*buffer
, *alignbuffer
;
314 unsigned long absize
;
316 absize
= keylen
+ alignmask
;
317 buffer
= kmalloc(absize
, GFP_ATOMIC
);
321 alignbuffer
= (u8
*)ALIGN((unsigned long)buffer
, alignmask
+ 1);
322 memcpy(alignbuffer
, key
, keylen
);
323 ret
= cipher
->setkey(tfm
, alignbuffer
, keylen
);
324 memset(alignbuffer
, 0, keylen
);
329 static int setkey(struct crypto_ablkcipher
*tfm
, const u8
*key
,
332 struct ablkcipher_alg
*cipher
= crypto_ablkcipher_alg(tfm
);
333 unsigned long alignmask
= crypto_ablkcipher_alignmask(tfm
);
335 if (keylen
< cipher
->min_keysize
|| keylen
> cipher
->max_keysize
) {
336 crypto_ablkcipher_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
340 if ((unsigned long)key
& alignmask
)
341 return setkey_unaligned(tfm
, key
, keylen
);
343 return cipher
->setkey(tfm
, key
, keylen
);
346 static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg
*alg
, u32 type
,
349 return alg
->cra_ctxsize
;
352 int skcipher_null_givencrypt(struct skcipher_givcrypt_request
*req
)
354 return crypto_ablkcipher_encrypt(&req
->creq
);
357 int skcipher_null_givdecrypt(struct skcipher_givcrypt_request
*req
)
359 return crypto_ablkcipher_decrypt(&req
->creq
);
362 static int crypto_init_ablkcipher_ops(struct crypto_tfm
*tfm
, u32 type
,
365 struct ablkcipher_alg
*alg
= &tfm
->__crt_alg
->cra_ablkcipher
;
366 struct ablkcipher_tfm
*crt
= &tfm
->crt_ablkcipher
;
368 if (alg
->ivsize
> PAGE_SIZE
/ 8)
371 crt
->setkey
= setkey
;
372 crt
->encrypt
= alg
->encrypt
;
373 crt
->decrypt
= alg
->decrypt
;
375 crt
->givencrypt
= skcipher_null_givencrypt
;
376 crt
->givdecrypt
= skcipher_null_givdecrypt
;
378 crt
->base
= __crypto_ablkcipher_cast(tfm
);
379 crt
->ivsize
= alg
->ivsize
;
384 static void crypto_ablkcipher_show(struct seq_file
*m
, struct crypto_alg
*alg
)
385 __attribute__ ((unused
));
386 static void crypto_ablkcipher_show(struct seq_file
*m
, struct crypto_alg
*alg
)
388 struct ablkcipher_alg
*ablkcipher
= &alg
->cra_ablkcipher
;
390 seq_printf(m
, "type : ablkcipher\n");
391 seq_printf(m
, "async : %s\n", alg
->cra_flags
& CRYPTO_ALG_ASYNC
?
393 seq_printf(m
, "blocksize : %u\n", alg
->cra_blocksize
);
394 seq_printf(m
, "min keysize : %u\n", ablkcipher
->min_keysize
);
395 seq_printf(m
, "max keysize : %u\n", ablkcipher
->max_keysize
);
396 seq_printf(m
, "ivsize : %u\n", ablkcipher
->ivsize
);
397 seq_printf(m
, "geniv : %s\n", ablkcipher
->geniv
?: "<default>");
400 const struct crypto_type crypto_ablkcipher_type
= {
401 .ctxsize
= crypto_ablkcipher_ctxsize
,
402 .init
= crypto_init_ablkcipher_ops
,
403 #ifdef CONFIG_PROC_FS
404 .show
= crypto_ablkcipher_show
,
407 EXPORT_SYMBOL_GPL(crypto_ablkcipher_type
);
409 static int no_givdecrypt(struct skcipher_givcrypt_request
*req
)
414 static int crypto_init_givcipher_ops(struct crypto_tfm
*tfm
, u32 type
,
417 struct ablkcipher_alg
*alg
= &tfm
->__crt_alg
->cra_ablkcipher
;
418 struct ablkcipher_tfm
*crt
= &tfm
->crt_ablkcipher
;
420 if (alg
->ivsize
> PAGE_SIZE
/ 8)
423 crt
->setkey
= tfm
->__crt_alg
->cra_flags
& CRYPTO_ALG_GENIV
?
424 alg
->setkey
: setkey
;
425 crt
->encrypt
= alg
->encrypt
;
426 crt
->decrypt
= alg
->decrypt
;
427 crt
->givencrypt
= alg
->givencrypt
;
428 crt
->givdecrypt
= alg
->givdecrypt
?: no_givdecrypt
;
429 crt
->base
= __crypto_ablkcipher_cast(tfm
);
430 crt
->ivsize
= alg
->ivsize
;
435 static void crypto_givcipher_show(struct seq_file
*m
, struct crypto_alg
*alg
)
436 __attribute__ ((unused
));
437 static void crypto_givcipher_show(struct seq_file
*m
, struct crypto_alg
*alg
)
439 struct ablkcipher_alg
*ablkcipher
= &alg
->cra_ablkcipher
;
441 seq_printf(m
, "type : givcipher\n");
442 seq_printf(m
, "async : %s\n", alg
->cra_flags
& CRYPTO_ALG_ASYNC
?
444 seq_printf(m
, "blocksize : %u\n", alg
->cra_blocksize
);
445 seq_printf(m
, "min keysize : %u\n", ablkcipher
->min_keysize
);
446 seq_printf(m
, "max keysize : %u\n", ablkcipher
->max_keysize
);
447 seq_printf(m
, "ivsize : %u\n", ablkcipher
->ivsize
);
448 seq_printf(m
, "geniv : %s\n", ablkcipher
->geniv
?: "<built-in>");
451 const struct crypto_type crypto_givcipher_type
= {
452 .ctxsize
= crypto_ablkcipher_ctxsize
,
453 .init
= crypto_init_givcipher_ops
,
454 #ifdef CONFIG_PROC_FS
455 .show
= crypto_givcipher_show
,
458 EXPORT_SYMBOL_GPL(crypto_givcipher_type
);
460 const char *crypto_default_geniv(const struct crypto_alg
*alg
)
462 if (((alg
->cra_flags
& CRYPTO_ALG_TYPE_MASK
) ==
463 CRYPTO_ALG_TYPE_BLKCIPHER
? alg
->cra_blkcipher
.ivsize
:
464 alg
->cra_ablkcipher
.ivsize
) !=
468 return alg
->cra_flags
& CRYPTO_ALG_ASYNC
?
469 "eseqiv" : skcipher_default_geniv
;
472 static int crypto_givcipher_default(struct crypto_alg
*alg
, u32 type
, u32 mask
)
474 struct rtattr
*tb
[3];
477 struct crypto_attr_type data
;
481 struct crypto_attr_alg data
;
483 struct crypto_template
*tmpl
;
484 struct crypto_instance
*inst
;
485 struct crypto_alg
*larval
;
489 larval
= crypto_larval_lookup(alg
->cra_driver_name
,
490 (type
& ~CRYPTO_ALG_TYPE_MASK
) |
491 CRYPTO_ALG_TYPE_GIVCIPHER
,
492 mask
| CRYPTO_ALG_TYPE_MASK
);
493 err
= PTR_ERR(larval
);
498 if (!crypto_is_larval(larval
))
501 ptype
.attr
.rta_len
= sizeof(ptype
);
502 ptype
.attr
.rta_type
= CRYPTOA_TYPE
;
503 ptype
.data
.type
= type
| CRYPTO_ALG_GENIV
;
504 /* GENIV tells the template that we're making a default geniv. */
505 ptype
.data
.mask
= mask
| CRYPTO_ALG_GENIV
;
508 palg
.attr
.rta_len
= sizeof(palg
);
509 palg
.attr
.rta_type
= CRYPTOA_ALG
;
510 /* Must use the exact name to locate ourselves. */
511 memcpy(palg
.data
.name
, alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
);
516 if ((alg
->cra_flags
& CRYPTO_ALG_TYPE_MASK
) ==
517 CRYPTO_ALG_TYPE_BLKCIPHER
)
518 geniv
= alg
->cra_blkcipher
.geniv
;
520 geniv
= alg
->cra_ablkcipher
.geniv
;
523 geniv
= crypto_default_geniv(alg
);
525 tmpl
= crypto_lookup_template(geniv
);
530 inst
= tmpl
->alloc(tb
);
535 if ((err
= crypto_register_instance(tmpl
, inst
))) {
540 /* Redo the lookup to use the instance we just registered. */
544 crypto_tmpl_put(tmpl
);
546 crypto_larval_kill(larval
);
548 crypto_mod_put(larval
);
554 static struct crypto_alg
*crypto_lookup_skcipher(const char *name
, u32 type
,
557 struct crypto_alg
*alg
;
559 alg
= crypto_alg_mod_lookup(name
, type
, mask
);
563 if ((alg
->cra_flags
& CRYPTO_ALG_TYPE_MASK
) ==
564 CRYPTO_ALG_TYPE_GIVCIPHER
)
567 if (!((alg
->cra_flags
& CRYPTO_ALG_TYPE_MASK
) ==
568 CRYPTO_ALG_TYPE_BLKCIPHER
? alg
->cra_blkcipher
.ivsize
:
569 alg
->cra_ablkcipher
.ivsize
))
573 alg
= crypto_alg_mod_lookup(name
, type
| CRYPTO_ALG_TESTED
,
574 mask
& ~CRYPTO_ALG_TESTED
);
578 if ((alg
->cra_flags
& CRYPTO_ALG_TYPE_MASK
) ==
579 CRYPTO_ALG_TYPE_GIVCIPHER
) {
580 if ((alg
->cra_flags
^ type
^ ~mask
) & CRYPTO_ALG_TESTED
) {
582 alg
= ERR_PTR(-ENOENT
);
587 BUG_ON(!((alg
->cra_flags
& CRYPTO_ALG_TYPE_MASK
) ==
588 CRYPTO_ALG_TYPE_BLKCIPHER
? alg
->cra_blkcipher
.ivsize
:
589 alg
->cra_ablkcipher
.ivsize
));
591 return ERR_PTR(crypto_givcipher_default(alg
, type
, mask
));
594 int crypto_grab_skcipher(struct crypto_skcipher_spawn
*spawn
, const char *name
,
597 struct crypto_alg
*alg
;
600 type
= crypto_skcipher_type(type
);
601 mask
= crypto_skcipher_mask(mask
);
603 alg
= crypto_lookup_skcipher(name
, type
, mask
);
607 err
= crypto_init_spawn(&spawn
->base
, alg
, spawn
->base
.inst
, mask
);
611 EXPORT_SYMBOL_GPL(crypto_grab_skcipher
);
613 struct crypto_ablkcipher
*crypto_alloc_ablkcipher(const char *alg_name
,
616 struct crypto_tfm
*tfm
;
619 type
= crypto_skcipher_type(type
);
620 mask
= crypto_skcipher_mask(mask
);
623 struct crypto_alg
*alg
;
625 alg
= crypto_lookup_skcipher(alg_name
, type
, mask
);
631 tfm
= __crypto_alloc_tfm(alg
, type
, mask
);
633 return __crypto_ablkcipher_cast(tfm
);
641 if (signal_pending(current
)) {
649 EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher
);
651 static int __init
skcipher_module_init(void)
653 skcipher_default_geniv
= num_possible_cpus() > 1 ?
654 "eseqiv" : "chainiv";
658 static void skcipher_module_exit(void)
662 module_init(skcipher_module_init
);
663 module_exit(skcipher_module_exit
);