irqchip: atmel-aic-common: Prevent clobbering of priority when changing IRQ type
[linux-2.6/btrfs-unstable.git] / crypto / ahash.c
blobf6a36a52d738b44251deaeaa2dd18ebee39da421
1 /*
2 * Asynchronous Cryptographic Hash operations.
4 * This is the asynchronous version of hash.c with notification of
5 * completion via a callback.
7 * Copyright (c) 2008 Loc Ho <lho@amcc.com>
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
16 #include <crypto/internal/hash.h>
17 #include <crypto/scatterwalk.h>
18 #include <linux/bug.h>
19 #include <linux/err.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <linux/seq_file.h>
25 #include <linux/cryptouser.h>
26 #include <net/netlink.h>
28 #include "internal.h"
30 struct ahash_request_priv {
31 crypto_completion_t complete;
32 void *data;
33 u8 *result;
34 void *ubuf[] CRYPTO_MINALIGN_ATTR;
37 static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
39 return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
40 halg);
43 static int hash_walk_next(struct crypto_hash_walk *walk)
45 unsigned int alignmask = walk->alignmask;
46 unsigned int offset = walk->offset;
47 unsigned int nbytes = min(walk->entrylen,
48 ((unsigned int)(PAGE_SIZE)) - offset);
50 if (walk->flags & CRYPTO_ALG_ASYNC)
51 walk->data = kmap(walk->pg);
52 else
53 walk->data = kmap_atomic(walk->pg);
54 walk->data += offset;
56 if (offset & alignmask) {
57 unsigned int unaligned = alignmask + 1 - (offset & alignmask);
58 if (nbytes > unaligned)
59 nbytes = unaligned;
62 walk->entrylen -= nbytes;
63 return nbytes;
66 static int hash_walk_new_entry(struct crypto_hash_walk *walk)
68 struct scatterlist *sg;
70 sg = walk->sg;
71 walk->pg = sg_page(sg);
72 walk->offset = sg->offset;
73 walk->entrylen = sg->length;
75 if (walk->entrylen > walk->total)
76 walk->entrylen = walk->total;
77 walk->total -= walk->entrylen;
79 return hash_walk_next(walk);
82 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
84 unsigned int alignmask = walk->alignmask;
85 unsigned int nbytes = walk->entrylen;
87 walk->data -= walk->offset;
89 if (nbytes && walk->offset & alignmask && !err) {
90 walk->offset = ALIGN(walk->offset, alignmask + 1);
91 walk->data += walk->offset;
93 nbytes = min(nbytes,
94 ((unsigned int)(PAGE_SIZE)) - walk->offset);
95 walk->entrylen -= nbytes;
97 return nbytes;
100 if (walk->flags & CRYPTO_ALG_ASYNC)
101 kunmap(walk->pg);
102 else {
103 kunmap_atomic(walk->data);
105 * The may sleep test only makes sense for sync users.
106 * Async users don't need to sleep here anyway.
108 crypto_yield(walk->flags);
111 if (err)
112 return err;
114 if (nbytes) {
115 walk->offset = 0;
116 walk->pg++;
117 return hash_walk_next(walk);
120 if (!walk->total)
121 return 0;
123 walk->sg = scatterwalk_sg_next(walk->sg);
125 return hash_walk_new_entry(walk);
127 EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
129 int crypto_hash_walk_first(struct ahash_request *req,
130 struct crypto_hash_walk *walk)
132 walk->total = req->nbytes;
134 if (!walk->total) {
135 walk->entrylen = 0;
136 return 0;
139 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
140 walk->sg = req->src;
141 walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
143 return hash_walk_new_entry(walk);
145 EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
147 int crypto_ahash_walk_first(struct ahash_request *req,
148 struct crypto_hash_walk *walk)
150 walk->total = req->nbytes;
152 if (!walk->total) {
153 walk->entrylen = 0;
154 return 0;
157 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
158 walk->sg = req->src;
159 walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
160 walk->flags |= CRYPTO_ALG_ASYNC;
162 BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);
164 return hash_walk_new_entry(walk);
166 EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
168 int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
169 struct crypto_hash_walk *walk,
170 struct scatterlist *sg, unsigned int len)
172 walk->total = len;
174 if (!walk->total) {
175 walk->entrylen = 0;
176 return 0;
179 walk->alignmask = crypto_hash_alignmask(hdesc->tfm);
180 walk->sg = sg;
181 walk->flags = hdesc->flags & CRYPTO_TFM_REQ_MASK;
183 return hash_walk_new_entry(walk);
186 static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
187 unsigned int keylen)
189 unsigned long alignmask = crypto_ahash_alignmask(tfm);
190 int ret;
191 u8 *buffer, *alignbuffer;
192 unsigned long absize;
194 absize = keylen + alignmask;
195 buffer = kmalloc(absize, GFP_KERNEL);
196 if (!buffer)
197 return -ENOMEM;
199 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
200 memcpy(alignbuffer, key, keylen);
201 ret = tfm->setkey(tfm, alignbuffer, keylen);
202 kzfree(buffer);
203 return ret;
206 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
207 unsigned int keylen)
209 unsigned long alignmask = crypto_ahash_alignmask(tfm);
211 if ((unsigned long)key & alignmask)
212 return ahash_setkey_unaligned(tfm, key, keylen);
214 return tfm->setkey(tfm, key, keylen);
216 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
218 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
219 unsigned int keylen)
221 return -ENOSYS;
224 static inline unsigned int ahash_align_buffer_size(unsigned len,
225 unsigned long mask)
227 return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
230 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
232 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
233 unsigned long alignmask = crypto_ahash_alignmask(tfm);
234 unsigned int ds = crypto_ahash_digestsize(tfm);
235 struct ahash_request_priv *priv;
237 priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
238 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
239 GFP_KERNEL : GFP_ATOMIC);
240 if (!priv)
241 return -ENOMEM;
244 * WARNING: Voodoo programming below!
246 * The code below is obscure and hard to understand, thus explanation
247 * is necessary. See include/crypto/hash.h and include/linux/crypto.h
248 * to understand the layout of structures used here!
250 * The code here will replace portions of the ORIGINAL request with
251 * pointers to new code and buffers so the hashing operation can store
252 * the result in aligned buffer. We will call the modified request
253 * an ADJUSTED request.
255 * The newly mangled request will look as such:
257 * req {
258 * .result = ADJUSTED[new aligned buffer]
259 * .base.complete = ADJUSTED[pointer to completion function]
260 * .base.data = ADJUSTED[*req (pointer to self)]
261 * .priv = ADJUSTED[new priv] {
262 * .result = ORIGINAL(result)
263 * .complete = ORIGINAL(base.complete)
264 * .data = ORIGINAL(base.data)
268 priv->result = req->result;
269 priv->complete = req->base.complete;
270 priv->data = req->base.data;
272 * WARNING: We do not backup req->priv here! The req->priv
273 * is for internal use of the Crypto API and the
274 * user must _NOT_ _EVER_ depend on it's content!
277 req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
278 req->base.complete = cplt;
279 req->base.data = req;
280 req->priv = priv;
282 return 0;
285 static void ahash_restore_req(struct ahash_request *req)
287 struct ahash_request_priv *priv = req->priv;
289 /* Restore the original crypto request. */
290 req->result = priv->result;
291 req->base.complete = priv->complete;
292 req->base.data = priv->data;
293 req->priv = NULL;
295 /* Free the req->priv.priv from the ADJUSTED request. */
296 kzfree(priv);
299 static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
301 struct ahash_request_priv *priv = req->priv;
303 if (err == -EINPROGRESS)
304 return;
306 if (!err)
307 memcpy(priv->result, req->result,
308 crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
310 ahash_restore_req(req);
313 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
315 struct ahash_request *areq = req->data;
318 * Restore the original request, see ahash_op_unaligned() for what
319 * goes where.
321 * The "struct ahash_request *req" here is in fact the "req.base"
322 * from the ADJUSTED request from ahash_op_unaligned(), thus as it
323 * is a pointer to self, it is also the ADJUSTED "req" .
326 /* First copy req->result into req->priv.result */
327 ahash_op_unaligned_finish(areq, err);
329 /* Complete the ORIGINAL request. */
330 areq->base.complete(&areq->base, err);
333 static int ahash_op_unaligned(struct ahash_request *req,
334 int (*op)(struct ahash_request *))
336 int err;
338 err = ahash_save_req(req, ahash_op_unaligned_done);
339 if (err)
340 return err;
342 err = op(req);
343 ahash_op_unaligned_finish(req, err);
345 return err;
348 static int crypto_ahash_op(struct ahash_request *req,
349 int (*op)(struct ahash_request *))
351 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
352 unsigned long alignmask = crypto_ahash_alignmask(tfm);
354 if ((unsigned long)req->result & alignmask)
355 return ahash_op_unaligned(req, op);
357 return op(req);
360 int crypto_ahash_final(struct ahash_request *req)
362 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
364 EXPORT_SYMBOL_GPL(crypto_ahash_final);
366 int crypto_ahash_finup(struct ahash_request *req)
368 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
370 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
372 int crypto_ahash_digest(struct ahash_request *req)
374 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
376 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
378 static void ahash_def_finup_finish2(struct ahash_request *req, int err)
380 struct ahash_request_priv *priv = req->priv;
382 if (err == -EINPROGRESS)
383 return;
385 if (!err)
386 memcpy(priv->result, req->result,
387 crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
389 ahash_restore_req(req);
392 static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
394 struct ahash_request *areq = req->data;
396 ahash_def_finup_finish2(areq, err);
398 areq->base.complete(&areq->base, err);
401 static int ahash_def_finup_finish1(struct ahash_request *req, int err)
403 if (err)
404 goto out;
406 req->base.complete = ahash_def_finup_done2;
407 req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
408 err = crypto_ahash_reqtfm(req)->final(req);
410 out:
411 ahash_def_finup_finish2(req, err);
412 return err;
415 static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
417 struct ahash_request *areq = req->data;
419 err = ahash_def_finup_finish1(areq, err);
421 areq->base.complete(&areq->base, err);
424 static int ahash_def_finup(struct ahash_request *req)
426 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
427 int err;
429 err = ahash_save_req(req, ahash_def_finup_done1);
430 if (err)
431 return err;
433 err = tfm->update(req);
434 return ahash_def_finup_finish1(req, err);
437 static int ahash_no_export(struct ahash_request *req, void *out)
439 return -ENOSYS;
442 static int ahash_no_import(struct ahash_request *req, const void *in)
444 return -ENOSYS;
447 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
449 struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
450 struct ahash_alg *alg = crypto_ahash_alg(hash);
452 hash->setkey = ahash_nosetkey;
453 hash->export = ahash_no_export;
454 hash->import = ahash_no_import;
456 if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
457 return crypto_init_shash_ops_async(tfm);
459 hash->init = alg->init;
460 hash->update = alg->update;
461 hash->final = alg->final;
462 hash->finup = alg->finup ?: ahash_def_finup;
463 hash->digest = alg->digest;
465 if (alg->setkey)
466 hash->setkey = alg->setkey;
467 if (alg->export)
468 hash->export = alg->export;
469 if (alg->import)
470 hash->import = alg->import;
472 return 0;
475 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
477 if (alg->cra_type == &crypto_ahash_type)
478 return alg->cra_ctxsize;
480 return sizeof(struct crypto_shash *);
483 #ifdef CONFIG_NET
484 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
486 struct crypto_report_hash rhash;
488 strncpy(rhash.type, "ahash", sizeof(rhash.type));
490 rhash.blocksize = alg->cra_blocksize;
491 rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
493 if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
494 sizeof(struct crypto_report_hash), &rhash))
495 goto nla_put_failure;
496 return 0;
498 nla_put_failure:
499 return -EMSGSIZE;
501 #else
502 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
504 return -ENOSYS;
506 #endif
508 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
509 __attribute__ ((unused));
510 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
512 seq_printf(m, "type : ahash\n");
513 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
514 "yes" : "no");
515 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
516 seq_printf(m, "digestsize : %u\n",
517 __crypto_hash_alg_common(alg)->digestsize);
520 const struct crypto_type crypto_ahash_type = {
521 .extsize = crypto_ahash_extsize,
522 .init_tfm = crypto_ahash_init_tfm,
523 #ifdef CONFIG_PROC_FS
524 .show = crypto_ahash_show,
525 #endif
526 .report = crypto_ahash_report,
527 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
528 .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
529 .type = CRYPTO_ALG_TYPE_AHASH,
530 .tfmsize = offsetof(struct crypto_ahash, base),
532 EXPORT_SYMBOL_GPL(crypto_ahash_type);
534 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
535 u32 mask)
537 return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
539 EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
541 static int ahash_prepare_alg(struct ahash_alg *alg)
543 struct crypto_alg *base = &alg->halg.base;
545 if (alg->halg.digestsize > PAGE_SIZE / 8 ||
546 alg->halg.statesize > PAGE_SIZE / 8)
547 return -EINVAL;
549 base->cra_type = &crypto_ahash_type;
550 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
551 base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
553 return 0;
556 int crypto_register_ahash(struct ahash_alg *alg)
558 struct crypto_alg *base = &alg->halg.base;
559 int err;
561 err = ahash_prepare_alg(alg);
562 if (err)
563 return err;
565 return crypto_register_alg(base);
567 EXPORT_SYMBOL_GPL(crypto_register_ahash);
569 int crypto_unregister_ahash(struct ahash_alg *alg)
571 return crypto_unregister_alg(&alg->halg.base);
573 EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
575 int ahash_register_instance(struct crypto_template *tmpl,
576 struct ahash_instance *inst)
578 int err;
580 err = ahash_prepare_alg(&inst->alg);
581 if (err)
582 return err;
584 return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
586 EXPORT_SYMBOL_GPL(ahash_register_instance);
588 void ahash_free_instance(struct crypto_instance *inst)
590 crypto_drop_spawn(crypto_instance_ctx(inst));
591 kfree(ahash_instance(inst));
593 EXPORT_SYMBOL_GPL(ahash_free_instance);
595 int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
596 struct hash_alg_common *alg,
597 struct crypto_instance *inst)
599 return crypto_init_spawn2(&spawn->base, &alg->base, inst,
600 &crypto_ahash_type);
602 EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
604 struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
606 struct crypto_alg *alg;
608 alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
609 return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
611 EXPORT_SYMBOL_GPL(ahash_attr_alg);
613 MODULE_LICENSE("GPL");
614 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");