KVM: sparse fixes for kvm/x86.c
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / crypto / digest.c
blobb526cc348b79313e21bea12808976f392b5505a8
1 /*
2 * Cryptographic API.
4 * Digest operations.
6 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
15 #include <crypto/scatterwalk.h>
16 #include <linux/mm.h>
17 #include <linux/errno.h>
18 #include <linux/hardirq.h>
19 #include <linux/highmem.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/scatterlist.h>
24 #include "internal.h"
26 static int init(struct hash_desc *desc)
28 struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
30 tfm->__crt_alg->cra_digest.dia_init(tfm);
31 return 0;
34 static int update2(struct hash_desc *desc,
35 struct scatterlist *sg, unsigned int nbytes)
37 struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
38 unsigned int alignmask = crypto_tfm_alg_alignmask(tfm);
40 if (!nbytes)
41 return 0;
43 for (;;) {
44 struct page *pg = sg_page(sg);
45 unsigned int offset = sg->offset;
46 unsigned int l = sg->length;
48 if (unlikely(l > nbytes))
49 l = nbytes;
50 nbytes -= l;
52 do {
53 unsigned int bytes_from_page = min(l, ((unsigned int)
54 (PAGE_SIZE)) -
55 offset);
56 char *src = crypto_kmap(pg, 0);
57 char *p = src + offset;
59 if (unlikely(offset & alignmask)) {
60 unsigned int bytes =
61 alignmask + 1 - (offset & alignmask);
62 bytes = min(bytes, bytes_from_page);
63 tfm->__crt_alg->cra_digest.dia_update(tfm, p,
64 bytes);
65 p += bytes;
66 bytes_from_page -= bytes;
67 l -= bytes;
69 tfm->__crt_alg->cra_digest.dia_update(tfm, p,
70 bytes_from_page);
71 crypto_kunmap(src, 0);
72 crypto_yield(desc->flags);
73 offset = 0;
74 pg++;
75 l -= bytes_from_page;
76 } while (l > 0);
78 if (!nbytes)
79 break;
80 sg = scatterwalk_sg_next(sg);
83 return 0;
86 static int update(struct hash_desc *desc,
87 struct scatterlist *sg, unsigned int nbytes)
89 if (WARN_ON_ONCE(in_irq()))
90 return -EDEADLK;
91 return update2(desc, sg, nbytes);
94 static int final(struct hash_desc *desc, u8 *out)
96 struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
97 unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
98 struct digest_alg *digest = &tfm->__crt_alg->cra_digest;
100 if (unlikely((unsigned long)out & alignmask)) {
101 unsigned long align = alignmask + 1;
102 unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm);
103 u8 *dst = (u8 *)ALIGN(addr, align) +
104 ALIGN(tfm->__crt_alg->cra_ctxsize, align);
106 digest->dia_final(tfm, dst);
107 memcpy(out, dst, digest->dia_digestsize);
108 } else
109 digest->dia_final(tfm, out);
111 return 0;
114 static int nosetkey(struct crypto_hash *tfm, const u8 *key, unsigned int keylen)
116 crypto_hash_clear_flags(tfm, CRYPTO_TFM_RES_MASK);
117 return -ENOSYS;
120 static int setkey(struct crypto_hash *hash, const u8 *key, unsigned int keylen)
122 struct crypto_tfm *tfm = crypto_hash_tfm(hash);
124 crypto_hash_clear_flags(hash, CRYPTO_TFM_RES_MASK);
125 return tfm->__crt_alg->cra_digest.dia_setkey(tfm, key, keylen);
128 static int digest(struct hash_desc *desc,
129 struct scatterlist *sg, unsigned int nbytes, u8 *out)
131 if (WARN_ON_ONCE(in_irq()))
132 return -EDEADLK;
134 init(desc);
135 update2(desc, sg, nbytes);
136 return final(desc, out);
139 int crypto_init_digest_ops(struct crypto_tfm *tfm)
141 struct hash_tfm *ops = &tfm->crt_hash;
142 struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
144 if (dalg->dia_digestsize > crypto_tfm_alg_blocksize(tfm))
145 return -EINVAL;
147 ops->init = init;
148 ops->update = update;
149 ops->final = final;
150 ops->digest = digest;
151 ops->setkey = dalg->dia_setkey ? setkey : nosetkey;
152 ops->digestsize = dalg->dia_digestsize;
154 return 0;
157 void crypto_exit_digest_ops(struct crypto_tfm *tfm)