[PATCH] UML Support - Ptrace: adds the host SYSEMU support, for UML and general usage
[linux-2.6/mini2440.git] / crypto / cipher.c
blob8da644364cb4225824575ec4813741b9d46d7041
1 /*
2 * Cryptographic API.
4 * Cipher operations.
6 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
7 * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
15 #include <linux/compiler.h>
16 #include <linux/kernel.h>
17 #include <linux/crypto.h>
18 #include <linux/errno.h>
19 #include <linux/mm.h>
20 #include <linux/slab.h>
21 #include <linux/string.h>
22 #include <asm/scatterlist.h>
23 #include "internal.h"
24 #include "scatterwalk.h"
26 static inline void xor_64(u8 *a, const u8 *b)
28 ((u32 *)a)[0] ^= ((u32 *)b)[0];
29 ((u32 *)a)[1] ^= ((u32 *)b)[1];
32 static inline void xor_128(u8 *a, const u8 *b)
34 ((u32 *)a)[0] ^= ((u32 *)b)[0];
35 ((u32 *)a)[1] ^= ((u32 *)b)[1];
36 ((u32 *)a)[2] ^= ((u32 *)b)[2];
37 ((u32 *)a)[3] ^= ((u32 *)b)[3];
40 static unsigned int crypt_slow(const struct cipher_desc *desc,
41 struct scatter_walk *in,
42 struct scatter_walk *out, unsigned int bsize)
44 unsigned long alignmask = crypto_tfm_alg_alignmask(desc->tfm);
45 u8 buffer[bsize * 2 + alignmask];
46 u8 *src = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
47 u8 *dst = src + bsize;
48 unsigned int n;
50 n = scatterwalk_copychunks(src, in, bsize, 0);
51 scatterwalk_advance(in, n);
53 desc->prfn(desc, dst, src, bsize);
55 n = scatterwalk_copychunks(dst, out, bsize, 1);
56 scatterwalk_advance(out, n);
58 return bsize;
61 static inline unsigned int crypt_fast(const struct cipher_desc *desc,
62 struct scatter_walk *in,
63 struct scatter_walk *out,
64 unsigned int nbytes, u8 *tmp)
66 u8 *src, *dst;
68 src = in->data;
69 dst = scatterwalk_samebuf(in, out) ? src : out->data;
71 if (tmp) {
72 memcpy(tmp, in->data, nbytes);
73 src = tmp;
74 dst = tmp;
77 nbytes = desc->prfn(desc, dst, src, nbytes);
79 if (tmp)
80 memcpy(out->data, tmp, nbytes);
82 scatterwalk_advance(in, nbytes);
83 scatterwalk_advance(out, nbytes);
85 return nbytes;
88 /*
89 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
90 * multiple page boundaries by using temporary blocks. In user context,
91 * the kernel is given a chance to schedule us once per page.
93 static int crypt(const struct cipher_desc *desc,
94 struct scatterlist *dst,
95 struct scatterlist *src,
96 unsigned int nbytes)
98 struct scatter_walk walk_in, walk_out;
99 struct crypto_tfm *tfm = desc->tfm;
100 const unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
101 unsigned int alignmask = crypto_tfm_alg_alignmask(tfm);
102 unsigned long buffer = 0;
104 if (!nbytes)
105 return 0;
107 if (nbytes % bsize) {
108 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
109 return -EINVAL;
112 scatterwalk_start(&walk_in, src);
113 scatterwalk_start(&walk_out, dst);
115 for(;;) {
116 unsigned int n = nbytes;
117 u8 *tmp = NULL;
119 if (!scatterwalk_aligned(&walk_in, alignmask) ||
120 !scatterwalk_aligned(&walk_out, alignmask)) {
121 if (!buffer) {
122 buffer = __get_free_page(GFP_ATOMIC);
123 if (!buffer)
124 n = 0;
126 tmp = (u8 *)buffer;
129 scatterwalk_map(&walk_in, 0);
130 scatterwalk_map(&walk_out, 1);
132 n = scatterwalk_clamp(&walk_in, n);
133 n = scatterwalk_clamp(&walk_out, n);
135 if (likely(n >= bsize))
136 n = crypt_fast(desc, &walk_in, &walk_out, n, tmp);
137 else
138 n = crypt_slow(desc, &walk_in, &walk_out, bsize);
140 nbytes -= n;
142 scatterwalk_done(&walk_in, 0, nbytes);
143 scatterwalk_done(&walk_out, 1, nbytes);
145 if (!nbytes)
146 break;
148 crypto_yield(tfm);
151 if (buffer)
152 free_page(buffer);
154 return 0;
157 static int crypt_iv_unaligned(struct cipher_desc *desc,
158 struct scatterlist *dst,
159 struct scatterlist *src,
160 unsigned int nbytes)
162 struct crypto_tfm *tfm = desc->tfm;
163 unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
164 u8 *iv = desc->info;
166 if (unlikely(((unsigned long)iv & alignmask))) {
167 unsigned int ivsize = tfm->crt_cipher.cit_ivsize;
168 u8 buffer[ivsize + alignmask];
169 u8 *tmp = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
170 int err;
172 desc->info = memcpy(tmp, iv, ivsize);
173 err = crypt(desc, dst, src, nbytes);
174 memcpy(iv, tmp, ivsize);
176 return err;
179 return crypt(desc, dst, src, nbytes);
182 static unsigned int cbc_process_encrypt(const struct cipher_desc *desc,
183 u8 *dst, const u8 *src,
184 unsigned int nbytes)
186 struct crypto_tfm *tfm = desc->tfm;
187 void (*xor)(u8 *, const u8 *) = tfm->crt_u.cipher.cit_xor_block;
188 int bsize = crypto_tfm_alg_blocksize(tfm);
190 void (*fn)(void *, u8 *, const u8 *) = desc->crfn;
191 u8 *iv = desc->info;
192 unsigned int done = 0;
194 do {
195 xor(iv, src);
196 fn(crypto_tfm_ctx(tfm), dst, iv);
197 memcpy(iv, dst, bsize);
199 src += bsize;
200 dst += bsize;
201 } while ((done += bsize) < nbytes);
203 return done;
206 static unsigned int cbc_process_decrypt(const struct cipher_desc *desc,
207 u8 *dst, const u8 *src,
208 unsigned int nbytes)
210 struct crypto_tfm *tfm = desc->tfm;
211 void (*xor)(u8 *, const u8 *) = tfm->crt_u.cipher.cit_xor_block;
212 int bsize = crypto_tfm_alg_blocksize(tfm);
214 u8 stack[src == dst ? bsize : 0];
215 u8 *buf = stack;
216 u8 **dst_p = src == dst ? &buf : &dst;
218 void (*fn)(void *, u8 *, const u8 *) = desc->crfn;
219 u8 *iv = desc->info;
220 unsigned int done = 0;
222 do {
223 u8 *tmp_dst = *dst_p;
225 fn(crypto_tfm_ctx(tfm), tmp_dst, src);
226 xor(tmp_dst, iv);
227 memcpy(iv, src, bsize);
228 if (tmp_dst != dst)
229 memcpy(dst, tmp_dst, bsize);
231 src += bsize;
232 dst += bsize;
233 } while ((done += bsize) < nbytes);
235 return done;
238 static unsigned int ecb_process(const struct cipher_desc *desc, u8 *dst,
239 const u8 *src, unsigned int nbytes)
241 struct crypto_tfm *tfm = desc->tfm;
242 int bsize = crypto_tfm_alg_blocksize(tfm);
243 void (*fn)(void *, u8 *, const u8 *) = desc->crfn;
244 unsigned int done = 0;
246 do {
247 fn(crypto_tfm_ctx(tfm), dst, src);
249 src += bsize;
250 dst += bsize;
251 } while ((done += bsize) < nbytes);
253 return done;
256 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
258 struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher;
260 if (keylen < cia->cia_min_keysize || keylen > cia->cia_max_keysize) {
261 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
262 return -EINVAL;
263 } else
264 return cia->cia_setkey(crypto_tfm_ctx(tfm), key, keylen,
265 &tfm->crt_flags);
268 static int ecb_encrypt(struct crypto_tfm *tfm,
269 struct scatterlist *dst,
270 struct scatterlist *src, unsigned int nbytes)
272 struct cipher_desc desc;
273 struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
275 desc.tfm = tfm;
276 desc.crfn = cipher->cia_encrypt;
277 desc.prfn = cipher->cia_encrypt_ecb ?: ecb_process;
279 return crypt(&desc, dst, src, nbytes);
282 static int ecb_decrypt(struct crypto_tfm *tfm,
283 struct scatterlist *dst,
284 struct scatterlist *src,
285 unsigned int nbytes)
287 struct cipher_desc desc;
288 struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
290 desc.tfm = tfm;
291 desc.crfn = cipher->cia_decrypt;
292 desc.prfn = cipher->cia_decrypt_ecb ?: ecb_process;
294 return crypt(&desc, dst, src, nbytes);
297 static int cbc_encrypt(struct crypto_tfm *tfm,
298 struct scatterlist *dst,
299 struct scatterlist *src,
300 unsigned int nbytes)
302 struct cipher_desc desc;
303 struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
305 desc.tfm = tfm;
306 desc.crfn = cipher->cia_encrypt;
307 desc.prfn = cipher->cia_encrypt_cbc ?: cbc_process_encrypt;
308 desc.info = tfm->crt_cipher.cit_iv;
310 return crypt(&desc, dst, src, nbytes);
313 static int cbc_encrypt_iv(struct crypto_tfm *tfm,
314 struct scatterlist *dst,
315 struct scatterlist *src,
316 unsigned int nbytes, u8 *iv)
318 struct cipher_desc desc;
319 struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
321 desc.tfm = tfm;
322 desc.crfn = cipher->cia_encrypt;
323 desc.prfn = cipher->cia_encrypt_cbc ?: cbc_process_encrypt;
324 desc.info = iv;
326 return crypt_iv_unaligned(&desc, dst, src, nbytes);
329 static int cbc_decrypt(struct crypto_tfm *tfm,
330 struct scatterlist *dst,
331 struct scatterlist *src,
332 unsigned int nbytes)
334 struct cipher_desc desc;
335 struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
337 desc.tfm = tfm;
338 desc.crfn = cipher->cia_decrypt;
339 desc.prfn = cipher->cia_decrypt_cbc ?: cbc_process_decrypt;
340 desc.info = tfm->crt_cipher.cit_iv;
342 return crypt(&desc, dst, src, nbytes);
345 static int cbc_decrypt_iv(struct crypto_tfm *tfm,
346 struct scatterlist *dst,
347 struct scatterlist *src,
348 unsigned int nbytes, u8 *iv)
350 struct cipher_desc desc;
351 struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
353 desc.tfm = tfm;
354 desc.crfn = cipher->cia_decrypt;
355 desc.prfn = cipher->cia_decrypt_cbc ?: cbc_process_decrypt;
356 desc.info = iv;
358 return crypt_iv_unaligned(&desc, dst, src, nbytes);
361 static int nocrypt(struct crypto_tfm *tfm,
362 struct scatterlist *dst,
363 struct scatterlist *src,
364 unsigned int nbytes)
366 return -ENOSYS;
369 static int nocrypt_iv(struct crypto_tfm *tfm,
370 struct scatterlist *dst,
371 struct scatterlist *src,
372 unsigned int nbytes, u8 *iv)
374 return -ENOSYS;
377 int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags)
379 u32 mode = flags & CRYPTO_TFM_MODE_MASK;
381 tfm->crt_cipher.cit_mode = mode ? mode : CRYPTO_TFM_MODE_ECB;
382 if (flags & CRYPTO_TFM_REQ_WEAK_KEY)
383 tfm->crt_flags = CRYPTO_TFM_REQ_WEAK_KEY;
385 return 0;
388 int crypto_init_cipher_ops(struct crypto_tfm *tfm)
390 int ret = 0;
391 struct cipher_tfm *ops = &tfm->crt_cipher;
393 ops->cit_setkey = setkey;
395 switch (tfm->crt_cipher.cit_mode) {
396 case CRYPTO_TFM_MODE_ECB:
397 ops->cit_encrypt = ecb_encrypt;
398 ops->cit_decrypt = ecb_decrypt;
399 break;
401 case CRYPTO_TFM_MODE_CBC:
402 ops->cit_encrypt = cbc_encrypt;
403 ops->cit_decrypt = cbc_decrypt;
404 ops->cit_encrypt_iv = cbc_encrypt_iv;
405 ops->cit_decrypt_iv = cbc_decrypt_iv;
406 break;
408 case CRYPTO_TFM_MODE_CFB:
409 ops->cit_encrypt = nocrypt;
410 ops->cit_decrypt = nocrypt;
411 ops->cit_encrypt_iv = nocrypt_iv;
412 ops->cit_decrypt_iv = nocrypt_iv;
413 break;
415 case CRYPTO_TFM_MODE_CTR:
416 ops->cit_encrypt = nocrypt;
417 ops->cit_decrypt = nocrypt;
418 ops->cit_encrypt_iv = nocrypt_iv;
419 ops->cit_decrypt_iv = nocrypt_iv;
420 break;
422 default:
423 BUG();
426 if (ops->cit_mode == CRYPTO_TFM_MODE_CBC) {
427 unsigned long align;
428 unsigned long addr;
430 switch (crypto_tfm_alg_blocksize(tfm)) {
431 case 8:
432 ops->cit_xor_block = xor_64;
433 break;
435 case 16:
436 ops->cit_xor_block = xor_128;
437 break;
439 default:
440 printk(KERN_WARNING "%s: block size %u not supported\n",
441 crypto_tfm_alg_name(tfm),
442 crypto_tfm_alg_blocksize(tfm));
443 ret = -EINVAL;
444 goto out;
447 ops->cit_ivsize = crypto_tfm_alg_blocksize(tfm);
448 align = crypto_tfm_alg_alignmask(tfm) + 1;
449 addr = (unsigned long)crypto_tfm_ctx(tfm);
450 addr = ALIGN(addr, align);
451 addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
452 ops->cit_iv = (void *)addr;
455 out:
456 return ret;
459 void crypto_exit_cipher_ops(struct crypto_tfm *tfm)