sched: fix init_hrtick() section mismatch warning
[linux-2.6/sactl.git] / drivers / crypto / padlock-aes.c
blobbf2917d197a018d13f1bc48ad4c93c0b0a93ccde
1 /*
2 * Cryptographic API.
4 * Support for VIA PadLock hardware crypto engine.
6 * Copyright (c) 2004 Michal Ludvig <michal@logix.cz>
8 */
10 #include <crypto/algapi.h>
11 #include <crypto/aes.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/types.h>
15 #include <linux/errno.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <asm/byteorder.h>
19 #include <asm/i387.h>
20 #include "padlock.h"
22 /* Control word. */
23 struct cword {
24 unsigned int __attribute__ ((__packed__))
25 rounds:4,
26 algo:3,
27 keygen:1,
28 interm:1,
29 encdec:1,
30 ksize:2;
31 } __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
33 /* Whenever making any changes to the following
34 * structure *make sure* you keep E, d_data
35 * and cword aligned on 16 Bytes boundaries and
36 * the Hardware can access 16 * 16 bytes of E and d_data
37 * (only the first 15 * 16 bytes matter but the HW reads
38 * more).
40 struct aes_ctx {
41 u32 E[AES_MAX_KEYLENGTH_U32]
42 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
43 u32 d_data[AES_MAX_KEYLENGTH_U32]
44 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
45 struct {
46 struct cword encrypt;
47 struct cword decrypt;
48 } cword;
49 u32 *D;
52 /* Tells whether the ACE is capable to generate
53 the extended key for a given key_len. */
54 static inline int
55 aes_hw_extkey_available(uint8_t key_len)
57 /* TODO: We should check the actual CPU model/stepping
58 as it's possible that the capability will be
59 added in the next CPU revisions. */
60 if (key_len == 16)
61 return 1;
62 return 0;
65 static inline struct aes_ctx *aes_ctx_common(void *ctx)
67 unsigned long addr = (unsigned long)ctx;
68 unsigned long align = PADLOCK_ALIGNMENT;
70 if (align <= crypto_tfm_ctx_alignment())
71 align = 1;
72 return (struct aes_ctx *)ALIGN(addr, align);
75 static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
77 return aes_ctx_common(crypto_tfm_ctx(tfm));
80 static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm)
82 return aes_ctx_common(crypto_blkcipher_ctx(tfm));
85 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
86 unsigned int key_len)
88 struct aes_ctx *ctx = aes_ctx(tfm);
89 const __le32 *key = (const __le32 *)in_key;
90 u32 *flags = &tfm->crt_flags;
91 struct crypto_aes_ctx gen_aes;
93 if (key_len % 8) {
94 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
95 return -EINVAL;
99 * If the hardware is capable of generating the extended key
100 * itself we must supply the plain key for both encryption
101 * and decryption.
103 ctx->D = ctx->E;
105 ctx->E[0] = le32_to_cpu(key[0]);
106 ctx->E[1] = le32_to_cpu(key[1]);
107 ctx->E[2] = le32_to_cpu(key[2]);
108 ctx->E[3] = le32_to_cpu(key[3]);
110 /* Prepare control words. */
111 memset(&ctx->cword, 0, sizeof(ctx->cword));
113 ctx->cword.decrypt.encdec = 1;
114 ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4;
115 ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds;
116 ctx->cword.encrypt.ksize = (key_len - 16) / 8;
117 ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize;
119 /* Don't generate extended keys if the hardware can do it. */
120 if (aes_hw_extkey_available(key_len))
121 return 0;
123 ctx->D = ctx->d_data;
124 ctx->cword.encrypt.keygen = 1;
125 ctx->cword.decrypt.keygen = 1;
127 if (crypto_aes_expand_key(&gen_aes, in_key, key_len)) {
128 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
129 return -EINVAL;
132 memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH);
133 memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH);
134 return 0;
137 /* ====== Encryption/decryption routines ====== */
139 /* These are the real call to PadLock. */
140 static inline void padlock_reset_key(void)
142 asm volatile ("pushfl; popfl");
146 * While the padlock instructions don't use FP/SSE registers, they
147 * generate a spurious DNA fault when cr0.ts is '1'. These instructions
148 * should be used only inside the irq_ts_save/restore() context
151 static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key,
152 void *control_word)
154 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
155 : "+S"(input), "+D"(output)
156 : "d"(control_word), "b"(key), "c"(1));
159 static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword)
161 u8 buf[AES_BLOCK_SIZE * 2 + PADLOCK_ALIGNMENT - 1];
162 u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
164 memcpy(tmp, in, AES_BLOCK_SIZE);
165 padlock_xcrypt(tmp, out, key, cword);
168 static inline void aes_crypt(const u8 *in, u8 *out, u32 *key,
169 struct cword *cword)
171 /* padlock_xcrypt requires at least two blocks of data. */
172 if (unlikely(!(((unsigned long)in ^ (PAGE_SIZE - AES_BLOCK_SIZE)) &
173 (PAGE_SIZE - 1)))) {
174 aes_crypt_copy(in, out, key, cword);
175 return;
178 padlock_xcrypt(in, out, key, cword);
181 static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
182 void *control_word, u32 count)
184 if (count == 1) {
185 aes_crypt(input, output, key, control_word);
186 return;
189 asm volatile ("test $1, %%cl;"
190 "je 1f;"
191 "lea -1(%%ecx), %%eax;"
192 "mov $1, %%ecx;"
193 ".byte 0xf3,0x0f,0xa7,0xc8;" /* rep xcryptecb */
194 "mov %%eax, %%ecx;"
195 "1:"
196 ".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
197 : "+S"(input), "+D"(output)
198 : "d"(control_word), "b"(key), "c"(count)
199 : "ax");
202 static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
203 u8 *iv, void *control_word, u32 count)
205 /* rep xcryptcbc */
206 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"
207 : "+S" (input), "+D" (output), "+a" (iv)
208 : "d" (control_word), "b" (key), "c" (count));
209 return iv;
212 static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
214 struct aes_ctx *ctx = aes_ctx(tfm);
215 int ts_state;
216 padlock_reset_key();
218 ts_state = irq_ts_save();
219 aes_crypt(in, out, ctx->E, &ctx->cword.encrypt);
220 irq_ts_restore(ts_state);
223 static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
225 struct aes_ctx *ctx = aes_ctx(tfm);
226 int ts_state;
227 padlock_reset_key();
229 ts_state = irq_ts_save();
230 aes_crypt(in, out, ctx->D, &ctx->cword.decrypt);
231 irq_ts_restore(ts_state);
234 static struct crypto_alg aes_alg = {
235 .cra_name = "aes",
236 .cra_driver_name = "aes-padlock",
237 .cra_priority = PADLOCK_CRA_PRIORITY,
238 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
239 .cra_blocksize = AES_BLOCK_SIZE,
240 .cra_ctxsize = sizeof(struct aes_ctx),
241 .cra_alignmask = PADLOCK_ALIGNMENT - 1,
242 .cra_module = THIS_MODULE,
243 .cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
244 .cra_u = {
245 .cipher = {
246 .cia_min_keysize = AES_MIN_KEY_SIZE,
247 .cia_max_keysize = AES_MAX_KEY_SIZE,
248 .cia_setkey = aes_set_key,
249 .cia_encrypt = aes_encrypt,
250 .cia_decrypt = aes_decrypt,
255 static int ecb_aes_encrypt(struct blkcipher_desc *desc,
256 struct scatterlist *dst, struct scatterlist *src,
257 unsigned int nbytes)
259 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
260 struct blkcipher_walk walk;
261 int err;
262 int ts_state;
264 padlock_reset_key();
266 blkcipher_walk_init(&walk, dst, src, nbytes);
267 err = blkcipher_walk_virt(desc, &walk);
269 ts_state = irq_ts_save();
270 while ((nbytes = walk.nbytes)) {
271 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
272 ctx->E, &ctx->cword.encrypt,
273 nbytes / AES_BLOCK_SIZE);
274 nbytes &= AES_BLOCK_SIZE - 1;
275 err = blkcipher_walk_done(desc, &walk, nbytes);
277 irq_ts_restore(ts_state);
279 return err;
282 static int ecb_aes_decrypt(struct blkcipher_desc *desc,
283 struct scatterlist *dst, struct scatterlist *src,
284 unsigned int nbytes)
286 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
287 struct blkcipher_walk walk;
288 int err;
289 int ts_state;
291 padlock_reset_key();
293 blkcipher_walk_init(&walk, dst, src, nbytes);
294 err = blkcipher_walk_virt(desc, &walk);
296 ts_state = irq_ts_save();
297 while ((nbytes = walk.nbytes)) {
298 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
299 ctx->D, &ctx->cword.decrypt,
300 nbytes / AES_BLOCK_SIZE);
301 nbytes &= AES_BLOCK_SIZE - 1;
302 err = blkcipher_walk_done(desc, &walk, nbytes);
304 irq_ts_restore(ts_state);
305 return err;
308 static struct crypto_alg ecb_aes_alg = {
309 .cra_name = "ecb(aes)",
310 .cra_driver_name = "ecb-aes-padlock",
311 .cra_priority = PADLOCK_COMPOSITE_PRIORITY,
312 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
313 .cra_blocksize = AES_BLOCK_SIZE,
314 .cra_ctxsize = sizeof(struct aes_ctx),
315 .cra_alignmask = PADLOCK_ALIGNMENT - 1,
316 .cra_type = &crypto_blkcipher_type,
317 .cra_module = THIS_MODULE,
318 .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list),
319 .cra_u = {
320 .blkcipher = {
321 .min_keysize = AES_MIN_KEY_SIZE,
322 .max_keysize = AES_MAX_KEY_SIZE,
323 .setkey = aes_set_key,
324 .encrypt = ecb_aes_encrypt,
325 .decrypt = ecb_aes_decrypt,
330 static int cbc_aes_encrypt(struct blkcipher_desc *desc,
331 struct scatterlist *dst, struct scatterlist *src,
332 unsigned int nbytes)
334 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
335 struct blkcipher_walk walk;
336 int err;
337 int ts_state;
339 padlock_reset_key();
341 blkcipher_walk_init(&walk, dst, src, nbytes);
342 err = blkcipher_walk_virt(desc, &walk);
344 ts_state = irq_ts_save();
345 while ((nbytes = walk.nbytes)) {
346 u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
347 walk.dst.virt.addr, ctx->E,
348 walk.iv, &ctx->cword.encrypt,
349 nbytes / AES_BLOCK_SIZE);
350 memcpy(walk.iv, iv, AES_BLOCK_SIZE);
351 nbytes &= AES_BLOCK_SIZE - 1;
352 err = blkcipher_walk_done(desc, &walk, nbytes);
354 irq_ts_restore(ts_state);
356 return err;
359 static int cbc_aes_decrypt(struct blkcipher_desc *desc,
360 struct scatterlist *dst, struct scatterlist *src,
361 unsigned int nbytes)
363 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
364 struct blkcipher_walk walk;
365 int err;
366 int ts_state;
368 padlock_reset_key();
370 blkcipher_walk_init(&walk, dst, src, nbytes);
371 err = blkcipher_walk_virt(desc, &walk);
373 ts_state = irq_ts_save();
374 while ((nbytes = walk.nbytes)) {
375 padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
376 ctx->D, walk.iv, &ctx->cword.decrypt,
377 nbytes / AES_BLOCK_SIZE);
378 nbytes &= AES_BLOCK_SIZE - 1;
379 err = blkcipher_walk_done(desc, &walk, nbytes);
382 irq_ts_restore(ts_state);
383 return err;
386 static struct crypto_alg cbc_aes_alg = {
387 .cra_name = "cbc(aes)",
388 .cra_driver_name = "cbc-aes-padlock",
389 .cra_priority = PADLOCK_COMPOSITE_PRIORITY,
390 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
391 .cra_blocksize = AES_BLOCK_SIZE,
392 .cra_ctxsize = sizeof(struct aes_ctx),
393 .cra_alignmask = PADLOCK_ALIGNMENT - 1,
394 .cra_type = &crypto_blkcipher_type,
395 .cra_module = THIS_MODULE,
396 .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list),
397 .cra_u = {
398 .blkcipher = {
399 .min_keysize = AES_MIN_KEY_SIZE,
400 .max_keysize = AES_MAX_KEY_SIZE,
401 .ivsize = AES_BLOCK_SIZE,
402 .setkey = aes_set_key,
403 .encrypt = cbc_aes_encrypt,
404 .decrypt = cbc_aes_decrypt,
409 static int __init padlock_init(void)
411 int ret;
413 if (!cpu_has_xcrypt) {
414 printk(KERN_NOTICE PFX "VIA PadLock not detected.\n");
415 return -ENODEV;
418 if (!cpu_has_xcrypt_enabled) {
419 printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
420 return -ENODEV;
423 if ((ret = crypto_register_alg(&aes_alg)))
424 goto aes_err;
426 if ((ret = crypto_register_alg(&ecb_aes_alg)))
427 goto ecb_aes_err;
429 if ((ret = crypto_register_alg(&cbc_aes_alg)))
430 goto cbc_aes_err;
432 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
434 out:
435 return ret;
437 cbc_aes_err:
438 crypto_unregister_alg(&ecb_aes_alg);
439 ecb_aes_err:
440 crypto_unregister_alg(&aes_alg);
441 aes_err:
442 printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n");
443 goto out;
446 static void __exit padlock_fini(void)
448 crypto_unregister_alg(&cbc_aes_alg);
449 crypto_unregister_alg(&ecb_aes_alg);
450 crypto_unregister_alg(&aes_alg);
453 module_init(padlock_init);
454 module_exit(padlock_fini);
456 MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
457 MODULE_LICENSE("GPL");
458 MODULE_AUTHOR("Michal Ludvig");
460 MODULE_ALIAS("aes");