x86: Make default_send_IPI_mask_sequence/allbutself_logical() 32bit only
[linux-2.6.git] / drivers / crypto / padlock-sha.c
blobadf075b6b9a810f376698c09c2f00446bf71614b
1 /*
2 * Cryptographic API.
4 * Support for VIA PadLock hardware crypto engine.
6 * Copyright (c) 2006 Michal Ludvig <michal@logix.cz>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
15 #include <crypto/internal/hash.h>
16 #include <crypto/padlock.h>
17 #include <crypto/sha.h>
18 #include <linux/err.h>
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/errno.h>
22 #include <linux/interrupt.h>
23 #include <linux/kernel.h>
24 #include <linux/scatterlist.h>
25 #include <asm/i387.h>
27 struct padlock_sha_desc {
28 struct shash_desc fallback;
31 struct padlock_sha_ctx {
32 struct crypto_shash *fallback;
35 static int padlock_sha_init(struct shash_desc *desc)
37 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
38 struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
40 dctx->fallback.tfm = ctx->fallback;
41 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
42 return crypto_shash_init(&dctx->fallback);
45 static int padlock_sha_update(struct shash_desc *desc,
46 const u8 *data, unsigned int length)
48 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
50 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
51 return crypto_shash_update(&dctx->fallback, data, length);
54 static int padlock_sha_export(struct shash_desc *desc, void *out)
56 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
58 return crypto_shash_export(&dctx->fallback, out);
61 static int padlock_sha_import(struct shash_desc *desc, const void *in)
63 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
64 struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
66 dctx->fallback.tfm = ctx->fallback;
67 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
68 return crypto_shash_import(&dctx->fallback, in);
71 static inline void padlock_output_block(uint32_t *src,
72 uint32_t *dst, size_t count)
74 while (count--)
75 *dst++ = swab32(*src++);
78 static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
79 unsigned int count, u8 *out)
81 /* We can't store directly to *out as it may be unaligned. */
82 /* BTW Don't reduce the buffer size below 128 Bytes!
83 * PadLock microcode needs it that big. */
84 char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
85 ((aligned(STACK_ALIGN)));
86 char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
87 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
88 struct sha1_state state;
89 unsigned int space;
90 unsigned int leftover;
91 int ts_state;
92 int err;
94 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
95 err = crypto_shash_export(&dctx->fallback, &state);
96 if (err)
97 goto out;
99 if (state.count + count > ULONG_MAX)
100 return crypto_shash_finup(&dctx->fallback, in, count, out);
102 leftover = ((state.count - 1) & (SHA1_BLOCK_SIZE - 1)) + 1;
103 space = SHA1_BLOCK_SIZE - leftover;
104 if (space) {
105 if (count > space) {
106 err = crypto_shash_update(&dctx->fallback, in, space) ?:
107 crypto_shash_export(&dctx->fallback, &state);
108 if (err)
109 goto out;
110 count -= space;
111 in += space;
112 } else {
113 memcpy(state.buffer + leftover, in, count);
114 in = state.buffer;
115 count += leftover;
116 state.count &= ~(SHA1_BLOCK_SIZE - 1);
120 memcpy(result, &state.state, SHA1_DIGEST_SIZE);
122 /* prevent taking the spurious DNA fault with padlock. */
123 ts_state = irq_ts_save();
124 asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
126 : "c"((unsigned long)state.count + count), \
127 "a"((unsigned long)state.count), \
128 "S"(in), "D"(result));
129 irq_ts_restore(ts_state);
131 padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
133 out:
134 return err;
137 static int padlock_sha1_final(struct shash_desc *desc, u8 *out)
139 u8 buf[4];
141 return padlock_sha1_finup(desc, buf, 0, out);
144 static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
145 unsigned int count, u8 *out)
147 /* We can't store directly to *out as it may be unaligned. */
148 /* BTW Don't reduce the buffer size below 128 Bytes!
149 * PadLock microcode needs it that big. */
150 char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
151 ((aligned(STACK_ALIGN)));
152 char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
153 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
154 struct sha256_state state;
155 unsigned int space;
156 unsigned int leftover;
157 int ts_state;
158 int err;
160 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
161 err = crypto_shash_export(&dctx->fallback, &state);
162 if (err)
163 goto out;
165 if (state.count + count > ULONG_MAX)
166 return crypto_shash_finup(&dctx->fallback, in, count, out);
168 leftover = ((state.count - 1) & (SHA256_BLOCK_SIZE - 1)) + 1;
169 space = SHA256_BLOCK_SIZE - leftover;
170 if (space) {
171 if (count > space) {
172 err = crypto_shash_update(&dctx->fallback, in, space) ?:
173 crypto_shash_export(&dctx->fallback, &state);
174 if (err)
175 goto out;
176 count -= space;
177 in += space;
178 } else {
179 memcpy(state.buf + leftover, in, count);
180 in = state.buf;
181 count += leftover;
182 state.count &= ~(SHA1_BLOCK_SIZE - 1);
186 memcpy(result, &state.state, SHA256_DIGEST_SIZE);
188 /* prevent taking the spurious DNA fault with padlock. */
189 ts_state = irq_ts_save();
190 asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
192 : "c"((unsigned long)state.count + count), \
193 "a"((unsigned long)state.count), \
194 "S"(in), "D"(result));
195 irq_ts_restore(ts_state);
197 padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
199 out:
200 return err;
203 static int padlock_sha256_final(struct shash_desc *desc, u8 *out)
205 u8 buf[4];
207 return padlock_sha256_finup(desc, buf, 0, out);
210 static int padlock_cra_init(struct crypto_tfm *tfm)
212 struct crypto_shash *hash = __crypto_shash_cast(tfm);
213 const char *fallback_driver_name = tfm->__crt_alg->cra_name;
214 struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
215 struct crypto_shash *fallback_tfm;
216 int err = -ENOMEM;
218 /* Allocate a fallback and abort if it failed. */
219 fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
220 CRYPTO_ALG_NEED_FALLBACK);
221 if (IS_ERR(fallback_tfm)) {
222 printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
223 fallback_driver_name);
224 err = PTR_ERR(fallback_tfm);
225 goto out;
228 ctx->fallback = fallback_tfm;
229 hash->descsize += crypto_shash_descsize(fallback_tfm);
230 return 0;
232 out:
233 return err;
236 static void padlock_cra_exit(struct crypto_tfm *tfm)
238 struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
240 crypto_free_shash(ctx->fallback);
243 static struct shash_alg sha1_alg = {
244 .digestsize = SHA1_DIGEST_SIZE,
245 .init = padlock_sha_init,
246 .update = padlock_sha_update,
247 .finup = padlock_sha1_finup,
248 .final = padlock_sha1_final,
249 .export = padlock_sha_export,
250 .import = padlock_sha_import,
251 .descsize = sizeof(struct padlock_sha_desc),
252 .statesize = sizeof(struct sha1_state),
253 .base = {
254 .cra_name = "sha1",
255 .cra_driver_name = "sha1-padlock",
256 .cra_priority = PADLOCK_CRA_PRIORITY,
257 .cra_flags = CRYPTO_ALG_TYPE_SHASH |
258 CRYPTO_ALG_NEED_FALLBACK,
259 .cra_blocksize = SHA1_BLOCK_SIZE,
260 .cra_ctxsize = sizeof(struct padlock_sha_ctx),
261 .cra_module = THIS_MODULE,
262 .cra_init = padlock_cra_init,
263 .cra_exit = padlock_cra_exit,
267 static struct shash_alg sha256_alg = {
268 .digestsize = SHA256_DIGEST_SIZE,
269 .init = padlock_sha_init,
270 .update = padlock_sha_update,
271 .finup = padlock_sha256_finup,
272 .final = padlock_sha256_final,
273 .export = padlock_sha_export,
274 .import = padlock_sha_import,
275 .descsize = sizeof(struct padlock_sha_desc),
276 .statesize = sizeof(struct sha256_state),
277 .base = {
278 .cra_name = "sha256",
279 .cra_driver_name = "sha256-padlock",
280 .cra_priority = PADLOCK_CRA_PRIORITY,
281 .cra_flags = CRYPTO_ALG_TYPE_SHASH |
282 CRYPTO_ALG_NEED_FALLBACK,
283 .cra_blocksize = SHA256_BLOCK_SIZE,
284 .cra_ctxsize = sizeof(struct padlock_sha_ctx),
285 .cra_module = THIS_MODULE,
286 .cra_init = padlock_cra_init,
287 .cra_exit = padlock_cra_exit,
291 static int __init padlock_init(void)
293 int rc = -ENODEV;
295 if (!cpu_has_phe) {
296 printk(KERN_NOTICE PFX "VIA PadLock Hash Engine not detected.\n");
297 return -ENODEV;
300 if (!cpu_has_phe_enabled) {
301 printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
302 return -ENODEV;
305 rc = crypto_register_shash(&sha1_alg);
306 if (rc)
307 goto out;
309 rc = crypto_register_shash(&sha256_alg);
310 if (rc)
311 goto out_unreg1;
313 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n");
315 return 0;
317 out_unreg1:
318 crypto_unregister_shash(&sha1_alg);
319 out:
320 printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
321 return rc;
324 static void __exit padlock_fini(void)
326 crypto_unregister_shash(&sha1_alg);
327 crypto_unregister_shash(&sha256_alg);
330 module_init(padlock_init);
331 module_exit(padlock_fini);
333 MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
334 MODULE_LICENSE("GPL");
335 MODULE_AUTHOR("Michal Ludvig");
337 MODULE_ALIAS("sha1-all");
338 MODULE_ALIAS("sha256-all");
339 MODULE_ALIAS("sha1-padlock");
340 MODULE_ALIAS("sha256-padlock");