6 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
7 * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
15 #include <linux/compiler.h>
16 #include <linux/kernel.h>
17 #include <linux/crypto.h>
18 #include <linux/errno.h>
20 #include <linux/slab.h>
21 #include <linux/string.h>
22 #include <asm/scatterlist.h>
24 #include "scatterwalk.h"
26 static inline void xor_64(u8
*a
, const u8
*b
)
28 ((u32
*)a
)[0] ^= ((u32
*)b
)[0];
29 ((u32
*)a
)[1] ^= ((u32
*)b
)[1];
32 static inline void xor_128(u8
*a
, const u8
*b
)
34 ((u32
*)a
)[0] ^= ((u32
*)b
)[0];
35 ((u32
*)a
)[1] ^= ((u32
*)b
)[1];
36 ((u32
*)a
)[2] ^= ((u32
*)b
)[2];
37 ((u32
*)a
)[3] ^= ((u32
*)b
)[3];
40 static unsigned int crypt_slow(const struct cipher_desc
*desc
,
41 struct scatter_walk
*in
,
42 struct scatter_walk
*out
, unsigned int bsize
)
44 unsigned long alignmask
= crypto_tfm_alg_alignmask(desc
->tfm
);
45 u8 buffer
[bsize
* 2 + alignmask
];
46 u8
*src
= (u8
*)ALIGN((unsigned long)buffer
, alignmask
+ 1);
47 u8
*dst
= src
+ bsize
;
50 n
= scatterwalk_copychunks(src
, in
, bsize
, 0);
51 scatterwalk_advance(in
, n
);
53 desc
->prfn(desc
, dst
, src
, bsize
);
55 n
= scatterwalk_copychunks(dst
, out
, bsize
, 1);
56 scatterwalk_advance(out
, n
);
61 static inline unsigned int crypt_fast(const struct cipher_desc
*desc
,
62 struct scatter_walk
*in
,
63 struct scatter_walk
*out
,
64 unsigned int nbytes
, u8
*tmp
)
69 dst
= scatterwalk_samebuf(in
, out
) ? src
: out
->data
;
72 memcpy(tmp
, in
->data
, nbytes
);
77 nbytes
= desc
->prfn(desc
, dst
, src
, nbytes
);
80 memcpy(out
->data
, tmp
, nbytes
);
82 scatterwalk_advance(in
, nbytes
);
83 scatterwalk_advance(out
, nbytes
);
89 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
90 * multiple page boundaries by using temporary blocks. In user context,
91 * the kernel is given a chance to schedule us once per page.
93 static int crypt(const struct cipher_desc
*desc
,
94 struct scatterlist
*dst
,
95 struct scatterlist
*src
,
98 struct scatter_walk walk_in
, walk_out
;
99 struct crypto_tfm
*tfm
= desc
->tfm
;
100 const unsigned int bsize
= crypto_tfm_alg_blocksize(tfm
);
101 unsigned int alignmask
= crypto_tfm_alg_alignmask(tfm
);
102 unsigned long buffer
= 0;
107 if (nbytes
% bsize
) {
108 tfm
->crt_flags
|= CRYPTO_TFM_RES_BAD_BLOCK_LEN
;
112 scatterwalk_start(&walk_in
, src
);
113 scatterwalk_start(&walk_out
, dst
);
116 unsigned int n
= nbytes
;
119 if (!scatterwalk_aligned(&walk_in
, alignmask
) ||
120 !scatterwalk_aligned(&walk_out
, alignmask
)) {
122 buffer
= __get_free_page(GFP_ATOMIC
);
129 scatterwalk_map(&walk_in
, 0);
130 scatterwalk_map(&walk_out
, 1);
132 n
= scatterwalk_clamp(&walk_in
, n
);
133 n
= scatterwalk_clamp(&walk_out
, n
);
135 if (likely(n
>= bsize
))
136 n
= crypt_fast(desc
, &walk_in
, &walk_out
, n
, tmp
);
138 n
= crypt_slow(desc
, &walk_in
, &walk_out
, bsize
);
142 scatterwalk_done(&walk_in
, 0, nbytes
);
143 scatterwalk_done(&walk_out
, 1, nbytes
);
157 static int crypt_iv_unaligned(struct cipher_desc
*desc
,
158 struct scatterlist
*dst
,
159 struct scatterlist
*src
,
162 struct crypto_tfm
*tfm
= desc
->tfm
;
163 unsigned long alignmask
= crypto_tfm_alg_alignmask(tfm
);
166 if (unlikely(((unsigned long)iv
& alignmask
))) {
167 unsigned int ivsize
= tfm
->crt_cipher
.cit_ivsize
;
168 u8 buffer
[ivsize
+ alignmask
];
169 u8
*tmp
= (u8
*)ALIGN((unsigned long)buffer
, alignmask
+ 1);
172 desc
->info
= memcpy(tmp
, iv
, ivsize
);
173 err
= crypt(desc
, dst
, src
, nbytes
);
174 memcpy(iv
, tmp
, ivsize
);
179 return crypt(desc
, dst
, src
, nbytes
);
182 static unsigned int cbc_process_encrypt(const struct cipher_desc
*desc
,
183 u8
*dst
, const u8
*src
,
186 struct crypto_tfm
*tfm
= desc
->tfm
;
187 void (*xor)(u8
*, const u8
*) = tfm
->crt_u
.cipher
.cit_xor_block
;
188 int bsize
= crypto_tfm_alg_blocksize(tfm
);
190 void (*fn
)(void *, u8
*, const u8
*) = desc
->crfn
;
192 unsigned int done
= 0;
198 fn(crypto_tfm_ctx(tfm
), dst
, iv
);
199 memcpy(iv
, dst
, bsize
);
203 } while ((done
+= bsize
) <= nbytes
);
208 static unsigned int cbc_process_decrypt(const struct cipher_desc
*desc
,
209 u8
*dst
, const u8
*src
,
212 struct crypto_tfm
*tfm
= desc
->tfm
;
213 void (*xor)(u8
*, const u8
*) = tfm
->crt_u
.cipher
.cit_xor_block
;
214 int bsize
= crypto_tfm_alg_blocksize(tfm
);
215 unsigned long alignmask
= crypto_tfm_alg_alignmask(desc
->tfm
);
217 u8 stack
[src
== dst
? bsize
+ alignmask
: 0];
218 u8
*buf
= (u8
*)ALIGN((unsigned long)stack
, alignmask
+ 1);
219 u8
**dst_p
= src
== dst
? &buf
: &dst
;
221 void (*fn
)(void *, u8
*, const u8
*) = desc
->crfn
;
223 unsigned int done
= 0;
228 u8
*tmp_dst
= *dst_p
;
230 fn(crypto_tfm_ctx(tfm
), tmp_dst
, src
);
232 memcpy(iv
, src
, bsize
);
234 memcpy(dst
, tmp_dst
, bsize
);
238 } while ((done
+= bsize
) <= nbytes
);
243 static unsigned int ecb_process(const struct cipher_desc
*desc
, u8
*dst
,
244 const u8
*src
, unsigned int nbytes
)
246 struct crypto_tfm
*tfm
= desc
->tfm
;
247 int bsize
= crypto_tfm_alg_blocksize(tfm
);
248 void (*fn
)(void *, u8
*, const u8
*) = desc
->crfn
;
249 unsigned int done
= 0;
254 fn(crypto_tfm_ctx(tfm
), dst
, src
);
258 } while ((done
+= bsize
) <= nbytes
);
263 static int setkey(struct crypto_tfm
*tfm
, const u8
*key
, unsigned int keylen
)
265 struct cipher_alg
*cia
= &tfm
->__crt_alg
->cra_cipher
;
267 if (keylen
< cia
->cia_min_keysize
|| keylen
> cia
->cia_max_keysize
) {
268 tfm
->crt_flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
271 return cia
->cia_setkey(crypto_tfm_ctx(tfm
), key
, keylen
,
275 static int ecb_encrypt(struct crypto_tfm
*tfm
,
276 struct scatterlist
*dst
,
277 struct scatterlist
*src
, unsigned int nbytes
)
279 struct cipher_desc desc
;
280 struct cipher_alg
*cipher
= &tfm
->__crt_alg
->cra_cipher
;
283 desc
.crfn
= cipher
->cia_encrypt
;
284 desc
.prfn
= cipher
->cia_encrypt_ecb
?: ecb_process
;
286 return crypt(&desc
, dst
, src
, nbytes
);
289 static int ecb_decrypt(struct crypto_tfm
*tfm
,
290 struct scatterlist
*dst
,
291 struct scatterlist
*src
,
294 struct cipher_desc desc
;
295 struct cipher_alg
*cipher
= &tfm
->__crt_alg
->cra_cipher
;
298 desc
.crfn
= cipher
->cia_decrypt
;
299 desc
.prfn
= cipher
->cia_decrypt_ecb
?: ecb_process
;
301 return crypt(&desc
, dst
, src
, nbytes
);
304 static int cbc_encrypt(struct crypto_tfm
*tfm
,
305 struct scatterlist
*dst
,
306 struct scatterlist
*src
,
309 struct cipher_desc desc
;
310 struct cipher_alg
*cipher
= &tfm
->__crt_alg
->cra_cipher
;
313 desc
.crfn
= cipher
->cia_encrypt
;
314 desc
.prfn
= cipher
->cia_encrypt_cbc
?: cbc_process_encrypt
;
315 desc
.info
= tfm
->crt_cipher
.cit_iv
;
317 return crypt(&desc
, dst
, src
, nbytes
);
320 static int cbc_encrypt_iv(struct crypto_tfm
*tfm
,
321 struct scatterlist
*dst
,
322 struct scatterlist
*src
,
323 unsigned int nbytes
, u8
*iv
)
325 struct cipher_desc desc
;
326 struct cipher_alg
*cipher
= &tfm
->__crt_alg
->cra_cipher
;
329 desc
.crfn
= cipher
->cia_encrypt
;
330 desc
.prfn
= cipher
->cia_encrypt_cbc
?: cbc_process_encrypt
;
333 return crypt_iv_unaligned(&desc
, dst
, src
, nbytes
);
336 static int cbc_decrypt(struct crypto_tfm
*tfm
,
337 struct scatterlist
*dst
,
338 struct scatterlist
*src
,
341 struct cipher_desc desc
;
342 struct cipher_alg
*cipher
= &tfm
->__crt_alg
->cra_cipher
;
345 desc
.crfn
= cipher
->cia_decrypt
;
346 desc
.prfn
= cipher
->cia_decrypt_cbc
?: cbc_process_decrypt
;
347 desc
.info
= tfm
->crt_cipher
.cit_iv
;
349 return crypt(&desc
, dst
, src
, nbytes
);
352 static int cbc_decrypt_iv(struct crypto_tfm
*tfm
,
353 struct scatterlist
*dst
,
354 struct scatterlist
*src
,
355 unsigned int nbytes
, u8
*iv
)
357 struct cipher_desc desc
;
358 struct cipher_alg
*cipher
= &tfm
->__crt_alg
->cra_cipher
;
361 desc
.crfn
= cipher
->cia_decrypt
;
362 desc
.prfn
= cipher
->cia_decrypt_cbc
?: cbc_process_decrypt
;
365 return crypt_iv_unaligned(&desc
, dst
, src
, nbytes
);
368 static int nocrypt(struct crypto_tfm
*tfm
,
369 struct scatterlist
*dst
,
370 struct scatterlist
*src
,
376 static int nocrypt_iv(struct crypto_tfm
*tfm
,
377 struct scatterlist
*dst
,
378 struct scatterlist
*src
,
379 unsigned int nbytes
, u8
*iv
)
384 int crypto_init_cipher_flags(struct crypto_tfm
*tfm
, u32 flags
)
386 u32 mode
= flags
& CRYPTO_TFM_MODE_MASK
;
387 tfm
->crt_cipher
.cit_mode
= mode
? mode
: CRYPTO_TFM_MODE_ECB
;
391 int crypto_init_cipher_ops(struct crypto_tfm
*tfm
)
394 struct cipher_tfm
*ops
= &tfm
->crt_cipher
;
396 ops
->cit_setkey
= setkey
;
398 switch (tfm
->crt_cipher
.cit_mode
) {
399 case CRYPTO_TFM_MODE_ECB
:
400 ops
->cit_encrypt
= ecb_encrypt
;
401 ops
->cit_decrypt
= ecb_decrypt
;
404 case CRYPTO_TFM_MODE_CBC
:
405 ops
->cit_encrypt
= cbc_encrypt
;
406 ops
->cit_decrypt
= cbc_decrypt
;
407 ops
->cit_encrypt_iv
= cbc_encrypt_iv
;
408 ops
->cit_decrypt_iv
= cbc_decrypt_iv
;
411 case CRYPTO_TFM_MODE_CFB
:
412 ops
->cit_encrypt
= nocrypt
;
413 ops
->cit_decrypt
= nocrypt
;
414 ops
->cit_encrypt_iv
= nocrypt_iv
;
415 ops
->cit_decrypt_iv
= nocrypt_iv
;
418 case CRYPTO_TFM_MODE_CTR
:
419 ops
->cit_encrypt
= nocrypt
;
420 ops
->cit_decrypt
= nocrypt
;
421 ops
->cit_encrypt_iv
= nocrypt_iv
;
422 ops
->cit_decrypt_iv
= nocrypt_iv
;
429 if (ops
->cit_mode
== CRYPTO_TFM_MODE_CBC
) {
433 switch (crypto_tfm_alg_blocksize(tfm
)) {
435 ops
->cit_xor_block
= xor_64
;
439 ops
->cit_xor_block
= xor_128
;
443 printk(KERN_WARNING
"%s: block size %u not supported\n",
444 crypto_tfm_alg_name(tfm
),
445 crypto_tfm_alg_blocksize(tfm
));
450 ops
->cit_ivsize
= crypto_tfm_alg_blocksize(tfm
);
451 align
= crypto_tfm_alg_alignmask(tfm
) + 1;
452 addr
= (unsigned long)crypto_tfm_ctx(tfm
);
453 addr
= ALIGN(addr
, align
);
454 addr
+= ALIGN(tfm
->__crt_alg
->cra_ctxsize
, align
);
455 ops
->cit_iv
= (void *)addr
;
462 void crypto_exit_cipher_ops(struct crypto_tfm
*tfm
)