2 * Block chaining cipher operations.
4 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
5 * multiple page boundaries by using temporary blocks. In user context,
6 * the kernel is given a chance to schedule us once per page.
8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
17 #include <linux/crypto.h>
18 #include <linux/errno.h>
19 #include <linux/hardirq.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/scatterlist.h>
23 #include <linux/seq_file.h>
24 #include <linux/slab.h>
25 #include <linux/string.h>
28 #include "scatterwalk.h"
31 BLKCIPHER_WALK_PHYS
= 1 << 0,
32 BLKCIPHER_WALK_SLOW
= 1 << 1,
33 BLKCIPHER_WALK_COPY
= 1 << 2,
34 BLKCIPHER_WALK_DIFF
= 1 << 3,
37 static int blkcipher_walk_next(struct blkcipher_desc
*desc
,
38 struct blkcipher_walk
*walk
);
39 static int blkcipher_walk_first(struct blkcipher_desc
*desc
,
40 struct blkcipher_walk
*walk
);
42 static inline void blkcipher_map_src(struct blkcipher_walk
*walk
)
44 walk
->src
.virt
.addr
= scatterwalk_map(&walk
->in
, 0);
47 static inline void blkcipher_map_dst(struct blkcipher_walk
*walk
)
49 walk
->dst
.virt
.addr
= scatterwalk_map(&walk
->out
, 1);
52 static inline void blkcipher_unmap_src(struct blkcipher_walk
*walk
)
54 scatterwalk_unmap(walk
->src
.virt
.addr
, 0);
57 static inline void blkcipher_unmap_dst(struct blkcipher_walk
*walk
)
59 scatterwalk_unmap(walk
->dst
.virt
.addr
, 1);
62 static inline u8
*blkcipher_get_spot(u8
*start
, unsigned int len
)
64 if (offset_in_page(start
+ len
) < len
)
65 return (u8
*)((unsigned long)(start
+ len
) & PAGE_MASK
);
69 static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher
*tfm
,
70 struct blkcipher_walk
*walk
,
74 unsigned int alignmask
= crypto_blkcipher_alignmask(tfm
);
76 addr
= (u8
*)ALIGN((unsigned long)walk
->buffer
, alignmask
+ 1);
77 addr
= blkcipher_get_spot(addr
, bsize
);
78 scatterwalk_copychunks(addr
, &walk
->out
, bsize
, 1);
82 static inline unsigned int blkcipher_done_fast(struct blkcipher_walk
*walk
,
87 if (walk
->flags
& BLKCIPHER_WALK_COPY
) {
88 blkcipher_map_dst(walk
);
89 memcpy(walk
->dst
.virt
.addr
, walk
->page
, n
);
90 blkcipher_unmap_dst(walk
);
91 } else if (!(walk
->flags
& BLKCIPHER_WALK_PHYS
)) {
92 blkcipher_unmap_src(walk
);
93 if (walk
->flags
& BLKCIPHER_WALK_DIFF
)
94 blkcipher_unmap_dst(walk
);
97 scatterwalk_advance(&walk
->in
, n
);
98 scatterwalk_advance(&walk
->out
, n
);
103 int blkcipher_walk_done(struct blkcipher_desc
*desc
,
104 struct blkcipher_walk
*walk
, int err
)
106 struct crypto_blkcipher
*tfm
= desc
->tfm
;
107 unsigned int nbytes
= 0;
109 if (likely(err
>= 0)) {
110 unsigned int bsize
= crypto_blkcipher_blocksize(tfm
);
113 if (likely(!(walk
->flags
& BLKCIPHER_WALK_SLOW
)))
114 n
= blkcipher_done_fast(walk
, err
);
116 n
= blkcipher_done_slow(tfm
, walk
, bsize
);
118 nbytes
= walk
->total
- n
;
122 scatterwalk_done(&walk
->in
, 0, nbytes
);
123 scatterwalk_done(&walk
->out
, 1, nbytes
);
125 walk
->total
= nbytes
;
126 walk
->nbytes
= nbytes
;
129 crypto_yield(desc
->flags
);
130 return blkcipher_walk_next(desc
, walk
);
133 if (walk
->iv
!= desc
->info
)
134 memcpy(desc
->info
, walk
->iv
, crypto_blkcipher_ivsize(tfm
));
135 if (walk
->buffer
!= walk
->page
)
138 free_page((unsigned long)walk
->page
);
142 EXPORT_SYMBOL_GPL(blkcipher_walk_done
);
144 static inline int blkcipher_next_slow(struct blkcipher_desc
*desc
,
145 struct blkcipher_walk
*walk
,
147 unsigned int alignmask
)
154 walk
->buffer
= walk
->page
;
158 n
= bsize
* 2 + (alignmask
& ~(crypto_tfm_ctx_alignment() - 1));
159 walk
->buffer
= kmalloc(n
, GFP_ATOMIC
);
161 return blkcipher_walk_done(desc
, walk
, -ENOMEM
);
164 walk
->dst
.virt
.addr
= (u8
*)ALIGN((unsigned long)walk
->buffer
,
166 walk
->dst
.virt
.addr
= blkcipher_get_spot(walk
->dst
.virt
.addr
, bsize
);
167 walk
->src
.virt
.addr
= blkcipher_get_spot(walk
->dst
.virt
.addr
+ bsize
,
170 scatterwalk_copychunks(walk
->src
.virt
.addr
, &walk
->in
, bsize
, 0);
172 walk
->nbytes
= bsize
;
173 walk
->flags
|= BLKCIPHER_WALK_SLOW
;
178 static inline int blkcipher_next_copy(struct blkcipher_walk
*walk
)
180 u8
*tmp
= walk
->page
;
182 blkcipher_map_src(walk
);
183 memcpy(tmp
, walk
->src
.virt
.addr
, walk
->nbytes
);
184 blkcipher_unmap_src(walk
);
186 walk
->src
.virt
.addr
= tmp
;
187 walk
->dst
.virt
.addr
= tmp
;
192 static inline int blkcipher_next_fast(struct blkcipher_desc
*desc
,
193 struct blkcipher_walk
*walk
)
197 walk
->src
.phys
.page
= scatterwalk_page(&walk
->in
);
198 walk
->src
.phys
.offset
= offset_in_page(walk
->in
.offset
);
199 walk
->dst
.phys
.page
= scatterwalk_page(&walk
->out
);
200 walk
->dst
.phys
.offset
= offset_in_page(walk
->out
.offset
);
202 if (walk
->flags
& BLKCIPHER_WALK_PHYS
)
205 diff
= walk
->src
.phys
.offset
- walk
->dst
.phys
.offset
;
206 diff
|= walk
->src
.virt
.page
- walk
->dst
.virt
.page
;
208 blkcipher_map_src(walk
);
209 walk
->dst
.virt
.addr
= walk
->src
.virt
.addr
;
212 walk
->flags
|= BLKCIPHER_WALK_DIFF
;
213 blkcipher_map_dst(walk
);
219 static int blkcipher_walk_next(struct blkcipher_desc
*desc
,
220 struct blkcipher_walk
*walk
)
222 struct crypto_blkcipher
*tfm
= desc
->tfm
;
223 unsigned int alignmask
= crypto_blkcipher_alignmask(tfm
);
224 unsigned int bsize
= crypto_blkcipher_blocksize(tfm
);
229 if (unlikely(n
< bsize
)) {
230 desc
->flags
|= CRYPTO_TFM_RES_BAD_BLOCK_LEN
;
231 return blkcipher_walk_done(desc
, walk
, -EINVAL
);
234 walk
->flags
&= ~(BLKCIPHER_WALK_SLOW
| BLKCIPHER_WALK_COPY
|
235 BLKCIPHER_WALK_DIFF
);
236 if (!scatterwalk_aligned(&walk
->in
, alignmask
) ||
237 !scatterwalk_aligned(&walk
->out
, alignmask
)) {
238 walk
->flags
|= BLKCIPHER_WALK_COPY
;
240 walk
->page
= (void *)__get_free_page(GFP_ATOMIC
);
246 n
= scatterwalk_clamp(&walk
->in
, n
);
247 n
= scatterwalk_clamp(&walk
->out
, n
);
249 if (unlikely(n
< bsize
)) {
250 err
= blkcipher_next_slow(desc
, walk
, bsize
, alignmask
);
251 goto set_phys_lowmem
;
255 if (walk
->flags
& BLKCIPHER_WALK_COPY
) {
256 err
= blkcipher_next_copy(walk
);
257 goto set_phys_lowmem
;
260 return blkcipher_next_fast(desc
, walk
);
263 if (walk
->flags
& BLKCIPHER_WALK_PHYS
) {
264 walk
->src
.phys
.page
= virt_to_page(walk
->src
.virt
.addr
);
265 walk
->dst
.phys
.page
= virt_to_page(walk
->dst
.virt
.addr
);
266 walk
->src
.phys
.offset
&= PAGE_SIZE
- 1;
267 walk
->dst
.phys
.offset
&= PAGE_SIZE
- 1;
272 static inline int blkcipher_copy_iv(struct blkcipher_walk
*walk
,
273 struct crypto_blkcipher
*tfm
,
274 unsigned int alignmask
)
276 unsigned bs
= crypto_blkcipher_blocksize(tfm
);
277 unsigned int ivsize
= crypto_blkcipher_ivsize(tfm
);
278 unsigned int size
= bs
* 2 + ivsize
+ max(bs
, ivsize
) - (alignmask
+ 1);
281 size
+= alignmask
& ~(crypto_tfm_ctx_alignment() - 1);
282 walk
->buffer
= kmalloc(size
, GFP_ATOMIC
);
286 iv
= (u8
*)ALIGN((unsigned long)walk
->buffer
, alignmask
+ 1);
287 iv
= blkcipher_get_spot(iv
, bs
) + bs
;
288 iv
= blkcipher_get_spot(iv
, bs
) + bs
;
289 iv
= blkcipher_get_spot(iv
, ivsize
);
291 walk
->iv
= memcpy(iv
, walk
->iv
, ivsize
);
295 int blkcipher_walk_virt(struct blkcipher_desc
*desc
,
296 struct blkcipher_walk
*walk
)
298 walk
->flags
&= ~BLKCIPHER_WALK_PHYS
;
299 return blkcipher_walk_first(desc
, walk
);
301 EXPORT_SYMBOL_GPL(blkcipher_walk_virt
);
303 int blkcipher_walk_phys(struct blkcipher_desc
*desc
,
304 struct blkcipher_walk
*walk
)
306 walk
->flags
|= BLKCIPHER_WALK_PHYS
;
307 return blkcipher_walk_first(desc
, walk
);
309 EXPORT_SYMBOL_GPL(blkcipher_walk_phys
);
311 static int blkcipher_walk_first(struct blkcipher_desc
*desc
,
312 struct blkcipher_walk
*walk
)
314 struct crypto_blkcipher
*tfm
= desc
->tfm
;
315 unsigned int alignmask
= crypto_blkcipher_alignmask(tfm
);
317 if (WARN_ON_ONCE(in_irq()))
320 walk
->nbytes
= walk
->total
;
321 if (unlikely(!walk
->total
))
325 walk
->iv
= desc
->info
;
326 if (unlikely(((unsigned long)walk
->iv
& alignmask
))) {
327 int err
= blkcipher_copy_iv(walk
, tfm
, alignmask
);
332 scatterwalk_start(&walk
->in
, walk
->in
.sg
);
333 scatterwalk_start(&walk
->out
, walk
->out
.sg
);
336 return blkcipher_walk_next(desc
, walk
);
339 static int setkey(struct crypto_tfm
*tfm
, const u8
*key
,
342 struct blkcipher_alg
*cipher
= &tfm
->__crt_alg
->cra_blkcipher
;
344 if (keylen
< cipher
->min_keysize
|| keylen
> cipher
->max_keysize
) {
345 tfm
->crt_flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
349 return cipher
->setkey(tfm
, key
, keylen
);
352 static int async_setkey(struct crypto_ablkcipher
*tfm
, const u8
*key
,
355 return setkey(crypto_ablkcipher_tfm(tfm
), key
, keylen
);
358 static int async_encrypt(struct ablkcipher_request
*req
)
360 struct crypto_tfm
*tfm
= req
->base
.tfm
;
361 struct blkcipher_alg
*alg
= &tfm
->__crt_alg
->cra_blkcipher
;
362 struct blkcipher_desc desc
= {
363 .tfm
= __crypto_blkcipher_cast(tfm
),
365 .flags
= req
->base
.flags
,
369 return alg
->encrypt(&desc
, req
->dst
, req
->src
, req
->nbytes
);
372 static int async_decrypt(struct ablkcipher_request
*req
)
374 struct crypto_tfm
*tfm
= req
->base
.tfm
;
375 struct blkcipher_alg
*alg
= &tfm
->__crt_alg
->cra_blkcipher
;
376 struct blkcipher_desc desc
= {
377 .tfm
= __crypto_blkcipher_cast(tfm
),
379 .flags
= req
->base
.flags
,
382 return alg
->decrypt(&desc
, req
->dst
, req
->src
, req
->nbytes
);
385 static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg
*alg
, u32 type
,
388 struct blkcipher_alg
*cipher
= &alg
->cra_blkcipher
;
389 unsigned int len
= alg
->cra_ctxsize
;
391 type
^= CRYPTO_ALG_ASYNC
;
392 mask
&= CRYPTO_ALG_ASYNC
;
393 if ((type
& mask
) && cipher
->ivsize
) {
394 len
= ALIGN(len
, (unsigned long)alg
->cra_alignmask
+ 1);
395 len
+= cipher
->ivsize
;
401 static int crypto_init_blkcipher_ops_async(struct crypto_tfm
*tfm
)
403 struct ablkcipher_tfm
*crt
= &tfm
->crt_ablkcipher
;
404 struct blkcipher_alg
*alg
= &tfm
->__crt_alg
->cra_blkcipher
;
406 crt
->setkey
= async_setkey
;
407 crt
->encrypt
= async_encrypt
;
408 crt
->decrypt
= async_decrypt
;
409 crt
->ivsize
= alg
->ivsize
;
414 static int crypto_init_blkcipher_ops_sync(struct crypto_tfm
*tfm
)
416 struct blkcipher_tfm
*crt
= &tfm
->crt_blkcipher
;
417 struct blkcipher_alg
*alg
= &tfm
->__crt_alg
->cra_blkcipher
;
418 unsigned long align
= crypto_tfm_alg_alignmask(tfm
) + 1;
421 crt
->setkey
= setkey
;
422 crt
->encrypt
= alg
->encrypt
;
423 crt
->decrypt
= alg
->decrypt
;
425 addr
= (unsigned long)crypto_tfm_ctx(tfm
);
426 addr
= ALIGN(addr
, align
);
427 addr
+= ALIGN(tfm
->__crt_alg
->cra_ctxsize
, align
);
428 crt
->iv
= (void *)addr
;
433 static int crypto_init_blkcipher_ops(struct crypto_tfm
*tfm
, u32 type
, u32 mask
)
435 struct blkcipher_alg
*alg
= &tfm
->__crt_alg
->cra_blkcipher
;
437 if (alg
->ivsize
> PAGE_SIZE
/ 8)
440 type
^= CRYPTO_ALG_ASYNC
;
441 mask
&= CRYPTO_ALG_ASYNC
;
443 return crypto_init_blkcipher_ops_sync(tfm
);
445 return crypto_init_blkcipher_ops_async(tfm
);
448 static void crypto_blkcipher_show(struct seq_file
*m
, struct crypto_alg
*alg
)
449 __attribute__ ((unused
));
450 static void crypto_blkcipher_show(struct seq_file
*m
, struct crypto_alg
*alg
)
452 seq_printf(m
, "type : blkcipher\n");
453 seq_printf(m
, "blocksize : %u\n", alg
->cra_blocksize
);
454 seq_printf(m
, "min keysize : %u\n", alg
->cra_blkcipher
.min_keysize
);
455 seq_printf(m
, "max keysize : %u\n", alg
->cra_blkcipher
.max_keysize
);
456 seq_printf(m
, "ivsize : %u\n", alg
->cra_blkcipher
.ivsize
);
459 const struct crypto_type crypto_blkcipher_type
= {
460 .ctxsize
= crypto_blkcipher_ctxsize
,
461 .init
= crypto_init_blkcipher_ops
,
462 #ifdef CONFIG_PROC_FS
463 .show
= crypto_blkcipher_show
,
466 EXPORT_SYMBOL_GPL(crypto_blkcipher_type
);
468 MODULE_LICENSE("GPL");
469 MODULE_DESCRIPTION("Generic block chaining cipher type");