2 * Block chaining cipher operations.
4 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
5 * multiple page boundaries by using temporary blocks. In user context,
6 * the kernel is given a chance to schedule us once per page.
8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
17 #include <linux/crypto.h>
18 #include <linux/errno.h>
19 #include <linux/hardirq.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/scatterlist.h>
23 #include <linux/seq_file.h>
24 #include <linux/slab.h>
25 #include <linux/string.h>
28 #include "scatterwalk.h"
31 BLKCIPHER_WALK_PHYS
= 1 << 0,
32 BLKCIPHER_WALK_SLOW
= 1 << 1,
33 BLKCIPHER_WALK_COPY
= 1 << 2,
34 BLKCIPHER_WALK_DIFF
= 1 << 3,
37 static int blkcipher_walk_next(struct blkcipher_desc
*desc
,
38 struct blkcipher_walk
*walk
);
39 static int blkcipher_walk_first(struct blkcipher_desc
*desc
,
40 struct blkcipher_walk
*walk
);
42 static inline void blkcipher_map_src(struct blkcipher_walk
*walk
)
44 walk
->src
.virt
.addr
= scatterwalk_map(&walk
->in
, 0);
47 static inline void blkcipher_map_dst(struct blkcipher_walk
*walk
)
49 walk
->dst
.virt
.addr
= scatterwalk_map(&walk
->out
, 1);
52 static inline void blkcipher_unmap_src(struct blkcipher_walk
*walk
)
54 scatterwalk_unmap(walk
->src
.virt
.addr
, 0);
57 static inline void blkcipher_unmap_dst(struct blkcipher_walk
*walk
)
59 scatterwalk_unmap(walk
->dst
.virt
.addr
, 1);
62 /* Get a spot of the specified length that does not straddle a page.
63 * The caller needs to ensure that there is enough space for this operation.
65 static inline u8
*blkcipher_get_spot(u8
*start
, unsigned int len
)
67 u8
*end_page
= (u8
*)(((unsigned long)(start
+ len
- 1)) & PAGE_MASK
);
68 return start
> end_page
? start
: end_page
;
71 static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher
*tfm
,
72 struct blkcipher_walk
*walk
,
76 unsigned int alignmask
= crypto_blkcipher_alignmask(tfm
);
78 addr
= (u8
*)ALIGN((unsigned long)walk
->buffer
, alignmask
+ 1);
79 addr
= blkcipher_get_spot(addr
, bsize
);
80 scatterwalk_copychunks(addr
, &walk
->out
, bsize
, 1);
84 static inline unsigned int blkcipher_done_fast(struct blkcipher_walk
*walk
,
89 if (walk
->flags
& BLKCIPHER_WALK_COPY
) {
90 blkcipher_map_dst(walk
);
91 memcpy(walk
->dst
.virt
.addr
, walk
->page
, n
);
92 blkcipher_unmap_dst(walk
);
93 } else if (!(walk
->flags
& BLKCIPHER_WALK_PHYS
)) {
94 blkcipher_unmap_src(walk
);
95 if (walk
->flags
& BLKCIPHER_WALK_DIFF
)
96 blkcipher_unmap_dst(walk
);
99 scatterwalk_advance(&walk
->in
, n
);
100 scatterwalk_advance(&walk
->out
, n
);
105 int blkcipher_walk_done(struct blkcipher_desc
*desc
,
106 struct blkcipher_walk
*walk
, int err
)
108 struct crypto_blkcipher
*tfm
= desc
->tfm
;
109 unsigned int nbytes
= 0;
111 if (likely(err
>= 0)) {
112 unsigned int bsize
= crypto_blkcipher_blocksize(tfm
);
115 if (likely(!(walk
->flags
& BLKCIPHER_WALK_SLOW
)))
116 n
= blkcipher_done_fast(walk
, err
);
118 n
= blkcipher_done_slow(tfm
, walk
, bsize
);
120 nbytes
= walk
->total
- n
;
124 scatterwalk_done(&walk
->in
, 0, nbytes
);
125 scatterwalk_done(&walk
->out
, 1, nbytes
);
127 walk
->total
= nbytes
;
128 walk
->nbytes
= nbytes
;
131 crypto_yield(desc
->flags
);
132 return blkcipher_walk_next(desc
, walk
);
135 if (walk
->iv
!= desc
->info
)
136 memcpy(desc
->info
, walk
->iv
, crypto_blkcipher_ivsize(tfm
));
137 if (walk
->buffer
!= walk
->page
)
140 free_page((unsigned long)walk
->page
);
144 EXPORT_SYMBOL_GPL(blkcipher_walk_done
);
146 static inline int blkcipher_next_slow(struct blkcipher_desc
*desc
,
147 struct blkcipher_walk
*walk
,
149 unsigned int alignmask
)
156 walk
->buffer
= walk
->page
;
160 n
= bsize
* 3 - (alignmask
+ 1) +
161 (alignmask
& ~(crypto_tfm_ctx_alignment() - 1));
162 walk
->buffer
= kmalloc(n
, GFP_ATOMIC
);
164 return blkcipher_walk_done(desc
, walk
, -ENOMEM
);
167 walk
->dst
.virt
.addr
= (u8
*)ALIGN((unsigned long)walk
->buffer
,
169 walk
->dst
.virt
.addr
= blkcipher_get_spot(walk
->dst
.virt
.addr
, bsize
);
170 walk
->src
.virt
.addr
= blkcipher_get_spot(walk
->dst
.virt
.addr
+ bsize
,
173 scatterwalk_copychunks(walk
->src
.virt
.addr
, &walk
->in
, bsize
, 0);
175 walk
->nbytes
= bsize
;
176 walk
->flags
|= BLKCIPHER_WALK_SLOW
;
181 static inline int blkcipher_next_copy(struct blkcipher_walk
*walk
)
183 u8
*tmp
= walk
->page
;
185 blkcipher_map_src(walk
);
186 memcpy(tmp
, walk
->src
.virt
.addr
, walk
->nbytes
);
187 blkcipher_unmap_src(walk
);
189 walk
->src
.virt
.addr
= tmp
;
190 walk
->dst
.virt
.addr
= tmp
;
195 static inline int blkcipher_next_fast(struct blkcipher_desc
*desc
,
196 struct blkcipher_walk
*walk
)
200 walk
->src
.phys
.page
= scatterwalk_page(&walk
->in
);
201 walk
->src
.phys
.offset
= offset_in_page(walk
->in
.offset
);
202 walk
->dst
.phys
.page
= scatterwalk_page(&walk
->out
);
203 walk
->dst
.phys
.offset
= offset_in_page(walk
->out
.offset
);
205 if (walk
->flags
& BLKCIPHER_WALK_PHYS
)
208 diff
= walk
->src
.phys
.offset
- walk
->dst
.phys
.offset
;
209 diff
|= walk
->src
.virt
.page
- walk
->dst
.virt
.page
;
211 blkcipher_map_src(walk
);
212 walk
->dst
.virt
.addr
= walk
->src
.virt
.addr
;
215 walk
->flags
|= BLKCIPHER_WALK_DIFF
;
216 blkcipher_map_dst(walk
);
222 static int blkcipher_walk_next(struct blkcipher_desc
*desc
,
223 struct blkcipher_walk
*walk
)
225 struct crypto_blkcipher
*tfm
= desc
->tfm
;
226 unsigned int alignmask
= crypto_blkcipher_alignmask(tfm
);
227 unsigned int bsize
= crypto_blkcipher_blocksize(tfm
);
232 if (unlikely(n
< bsize
)) {
233 desc
->flags
|= CRYPTO_TFM_RES_BAD_BLOCK_LEN
;
234 return blkcipher_walk_done(desc
, walk
, -EINVAL
);
237 walk
->flags
&= ~(BLKCIPHER_WALK_SLOW
| BLKCIPHER_WALK_COPY
|
238 BLKCIPHER_WALK_DIFF
);
239 if (!scatterwalk_aligned(&walk
->in
, alignmask
) ||
240 !scatterwalk_aligned(&walk
->out
, alignmask
)) {
241 walk
->flags
|= BLKCIPHER_WALK_COPY
;
243 walk
->page
= (void *)__get_free_page(GFP_ATOMIC
);
249 n
= scatterwalk_clamp(&walk
->in
, n
);
250 n
= scatterwalk_clamp(&walk
->out
, n
);
252 if (unlikely(n
< bsize
)) {
253 err
= blkcipher_next_slow(desc
, walk
, bsize
, alignmask
);
254 goto set_phys_lowmem
;
258 if (walk
->flags
& BLKCIPHER_WALK_COPY
) {
259 err
= blkcipher_next_copy(walk
);
260 goto set_phys_lowmem
;
263 return blkcipher_next_fast(desc
, walk
);
266 if (walk
->flags
& BLKCIPHER_WALK_PHYS
) {
267 walk
->src
.phys
.page
= virt_to_page(walk
->src
.virt
.addr
);
268 walk
->dst
.phys
.page
= virt_to_page(walk
->dst
.virt
.addr
);
269 walk
->src
.phys
.offset
&= PAGE_SIZE
- 1;
270 walk
->dst
.phys
.offset
&= PAGE_SIZE
- 1;
275 static inline int blkcipher_copy_iv(struct blkcipher_walk
*walk
,
276 struct crypto_blkcipher
*tfm
,
277 unsigned int alignmask
)
279 unsigned bs
= crypto_blkcipher_blocksize(tfm
);
280 unsigned int ivsize
= crypto_blkcipher_ivsize(tfm
);
281 unsigned int size
= bs
* 2 + ivsize
+ max(bs
, ivsize
) - (alignmask
+ 1);
284 size
+= alignmask
& ~(crypto_tfm_ctx_alignment() - 1);
285 walk
->buffer
= kmalloc(size
, GFP_ATOMIC
);
289 iv
= (u8
*)ALIGN((unsigned long)walk
->buffer
, alignmask
+ 1);
290 iv
= blkcipher_get_spot(iv
, bs
) + bs
;
291 iv
= blkcipher_get_spot(iv
, bs
) + bs
;
292 iv
= blkcipher_get_spot(iv
, ivsize
);
294 walk
->iv
= memcpy(iv
, walk
->iv
, ivsize
);
298 int blkcipher_walk_virt(struct blkcipher_desc
*desc
,
299 struct blkcipher_walk
*walk
)
301 walk
->flags
&= ~BLKCIPHER_WALK_PHYS
;
302 return blkcipher_walk_first(desc
, walk
);
304 EXPORT_SYMBOL_GPL(blkcipher_walk_virt
);
306 int blkcipher_walk_phys(struct blkcipher_desc
*desc
,
307 struct blkcipher_walk
*walk
)
309 walk
->flags
|= BLKCIPHER_WALK_PHYS
;
310 return blkcipher_walk_first(desc
, walk
);
312 EXPORT_SYMBOL_GPL(blkcipher_walk_phys
);
314 static int blkcipher_walk_first(struct blkcipher_desc
*desc
,
315 struct blkcipher_walk
*walk
)
317 struct crypto_blkcipher
*tfm
= desc
->tfm
;
318 unsigned int alignmask
= crypto_blkcipher_alignmask(tfm
);
320 if (WARN_ON_ONCE(in_irq()))
323 walk
->nbytes
= walk
->total
;
324 if (unlikely(!walk
->total
))
328 walk
->iv
= desc
->info
;
329 if (unlikely(((unsigned long)walk
->iv
& alignmask
))) {
330 int err
= blkcipher_copy_iv(walk
, tfm
, alignmask
);
335 scatterwalk_start(&walk
->in
, walk
->in
.sg
);
336 scatterwalk_start(&walk
->out
, walk
->out
.sg
);
339 return blkcipher_walk_next(desc
, walk
);
342 static int setkey_unaligned(struct crypto_tfm
*tfm
, const u8
*key
, unsigned int keylen
)
344 struct blkcipher_alg
*cipher
= &tfm
->__crt_alg
->cra_blkcipher
;
345 unsigned long alignmask
= crypto_tfm_alg_alignmask(tfm
);
347 u8
*buffer
, *alignbuffer
;
348 unsigned long absize
;
350 absize
= keylen
+ alignmask
;
351 buffer
= kmalloc(absize
, GFP_ATOMIC
);
355 alignbuffer
= (u8
*)ALIGN((unsigned long)buffer
, alignmask
+ 1);
356 memcpy(alignbuffer
, key
, keylen
);
357 ret
= cipher
->setkey(tfm
, alignbuffer
, keylen
);
358 memset(alignbuffer
, 0, keylen
);
363 static int setkey(struct crypto_tfm
*tfm
, const u8
*key
,
366 struct blkcipher_alg
*cipher
= &tfm
->__crt_alg
->cra_blkcipher
;
367 unsigned long alignmask
= crypto_tfm_alg_alignmask(tfm
);
369 if (keylen
< cipher
->min_keysize
|| keylen
> cipher
->max_keysize
) {
370 tfm
->crt_flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
374 if ((unsigned long)key
& alignmask
)
375 return setkey_unaligned(tfm
, key
, keylen
);
377 return cipher
->setkey(tfm
, key
, keylen
);
380 static int async_setkey(struct crypto_ablkcipher
*tfm
, const u8
*key
,
383 return setkey(crypto_ablkcipher_tfm(tfm
), key
, keylen
);
386 static int async_encrypt(struct ablkcipher_request
*req
)
388 struct crypto_tfm
*tfm
= req
->base
.tfm
;
389 struct blkcipher_alg
*alg
= &tfm
->__crt_alg
->cra_blkcipher
;
390 struct blkcipher_desc desc
= {
391 .tfm
= __crypto_blkcipher_cast(tfm
),
393 .flags
= req
->base
.flags
,
397 return alg
->encrypt(&desc
, req
->dst
, req
->src
, req
->nbytes
);
400 static int async_decrypt(struct ablkcipher_request
*req
)
402 struct crypto_tfm
*tfm
= req
->base
.tfm
;
403 struct blkcipher_alg
*alg
= &tfm
->__crt_alg
->cra_blkcipher
;
404 struct blkcipher_desc desc
= {
405 .tfm
= __crypto_blkcipher_cast(tfm
),
407 .flags
= req
->base
.flags
,
410 return alg
->decrypt(&desc
, req
->dst
, req
->src
, req
->nbytes
);
413 static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg
*alg
, u32 type
,
416 struct blkcipher_alg
*cipher
= &alg
->cra_blkcipher
;
417 unsigned int len
= alg
->cra_ctxsize
;
419 type
^= CRYPTO_ALG_ASYNC
;
420 mask
&= CRYPTO_ALG_ASYNC
;
421 if ((type
& mask
) && cipher
->ivsize
) {
422 len
= ALIGN(len
, (unsigned long)alg
->cra_alignmask
+ 1);
423 len
+= cipher
->ivsize
;
429 static int crypto_init_blkcipher_ops_async(struct crypto_tfm
*tfm
)
431 struct ablkcipher_tfm
*crt
= &tfm
->crt_ablkcipher
;
432 struct blkcipher_alg
*alg
= &tfm
->__crt_alg
->cra_blkcipher
;
434 crt
->setkey
= async_setkey
;
435 crt
->encrypt
= async_encrypt
;
436 crt
->decrypt
= async_decrypt
;
437 crt
->ivsize
= alg
->ivsize
;
442 static int crypto_init_blkcipher_ops_sync(struct crypto_tfm
*tfm
)
444 struct blkcipher_tfm
*crt
= &tfm
->crt_blkcipher
;
445 struct blkcipher_alg
*alg
= &tfm
->__crt_alg
->cra_blkcipher
;
446 unsigned long align
= crypto_tfm_alg_alignmask(tfm
) + 1;
449 crt
->setkey
= setkey
;
450 crt
->encrypt
= alg
->encrypt
;
451 crt
->decrypt
= alg
->decrypt
;
453 addr
= (unsigned long)crypto_tfm_ctx(tfm
);
454 addr
= ALIGN(addr
, align
);
455 addr
+= ALIGN(tfm
->__crt_alg
->cra_ctxsize
, align
);
456 crt
->iv
= (void *)addr
;
461 static int crypto_init_blkcipher_ops(struct crypto_tfm
*tfm
, u32 type
, u32 mask
)
463 struct blkcipher_alg
*alg
= &tfm
->__crt_alg
->cra_blkcipher
;
465 if (alg
->ivsize
> PAGE_SIZE
/ 8)
468 type
^= CRYPTO_ALG_ASYNC
;
469 mask
&= CRYPTO_ALG_ASYNC
;
471 return crypto_init_blkcipher_ops_sync(tfm
);
473 return crypto_init_blkcipher_ops_async(tfm
);
476 static void crypto_blkcipher_show(struct seq_file
*m
, struct crypto_alg
*alg
)
477 __attribute__ ((unused
));
478 static void crypto_blkcipher_show(struct seq_file
*m
, struct crypto_alg
*alg
)
480 seq_printf(m
, "type : blkcipher\n");
481 seq_printf(m
, "blocksize : %u\n", alg
->cra_blocksize
);
482 seq_printf(m
, "min keysize : %u\n", alg
->cra_blkcipher
.min_keysize
);
483 seq_printf(m
, "max keysize : %u\n", alg
->cra_blkcipher
.max_keysize
);
484 seq_printf(m
, "ivsize : %u\n", alg
->cra_blkcipher
.ivsize
);
487 const struct crypto_type crypto_blkcipher_type
= {
488 .ctxsize
= crypto_blkcipher_ctxsize
,
489 .init
= crypto_init_blkcipher_ops
,
490 #ifdef CONFIG_PROC_FS
491 .show
= crypto_blkcipher_show
,
494 EXPORT_SYMBOL_GPL(crypto_blkcipher_type
);
496 MODULE_LICENSE("GPL");
497 MODULE_DESCRIPTION("Generic block chaining cipher type");