4 * Support for VIA PadLock hardware crypto engine.
6 * Copyright (c) 2004 Michal Ludvig <michal@logix.cz>
10 #include <crypto/algapi.h>
11 #include <crypto/aes.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/types.h>
15 #include <linux/errno.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <linux/percpu.h>
19 #include <linux/smp.h>
20 #include <linux/slab.h>
21 #include <asm/byteorder.h>
22 #include <asm/processor.h>
27 * Number of data blocks actually fetched for each xcrypt insn.
28 * Processors with prefetch errata will fetch extra blocks.
30 static unsigned int ecb_fetch_blocks
= 2;
31 #define MAX_ECB_FETCH_BLOCKS (8)
32 #define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE)
34 static unsigned int cbc_fetch_blocks
= 1;
35 #define MAX_CBC_FETCH_BLOCKS (4)
36 #define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE)
40 unsigned int __attribute__ ((__packed__
))
47 } __attribute__ ((__aligned__(PADLOCK_ALIGNMENT
)));
49 /* Whenever making any changes to the following
50 * structure *make sure* you keep E, d_data
51 * and cword aligned on 16 Bytes boundaries and
52 * the Hardware can access 16 * 16 bytes of E and d_data
53 * (only the first 15 * 16 bytes matter but the HW reads
57 u32 E
[AES_MAX_KEYLENGTH_U32
]
58 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT
)));
59 u32 d_data
[AES_MAX_KEYLENGTH_U32
]
60 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT
)));
68 static DEFINE_PER_CPU(struct cword
*, paes_last_cword
);
70 /* Tells whether the ACE is capable to generate
71 the extended key for a given key_len. */
73 aes_hw_extkey_available(uint8_t key_len
)
75 /* TODO: We should check the actual CPU model/stepping
76 as it's possible that the capability will be
77 added in the next CPU revisions. */
83 static inline struct aes_ctx
*aes_ctx_common(void *ctx
)
85 unsigned long addr
= (unsigned long)ctx
;
86 unsigned long align
= PADLOCK_ALIGNMENT
;
88 if (align
<= crypto_tfm_ctx_alignment())
90 return (struct aes_ctx
*)ALIGN(addr
, align
);
93 static inline struct aes_ctx
*aes_ctx(struct crypto_tfm
*tfm
)
95 return aes_ctx_common(crypto_tfm_ctx(tfm
));
98 static inline struct aes_ctx
*blk_aes_ctx(struct crypto_blkcipher
*tfm
)
100 return aes_ctx_common(crypto_blkcipher_ctx(tfm
));
103 static int aes_set_key(struct crypto_tfm
*tfm
, const u8
*in_key
,
104 unsigned int key_len
)
106 struct aes_ctx
*ctx
= aes_ctx(tfm
);
107 const __le32
*key
= (const __le32
*)in_key
;
108 u32
*flags
= &tfm
->crt_flags
;
109 struct crypto_aes_ctx gen_aes
;
113 *flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
118 * If the hardware is capable of generating the extended key
119 * itself we must supply the plain key for both encryption
124 ctx
->E
[0] = le32_to_cpu(key
[0]);
125 ctx
->E
[1] = le32_to_cpu(key
[1]);
126 ctx
->E
[2] = le32_to_cpu(key
[2]);
127 ctx
->E
[3] = le32_to_cpu(key
[3]);
129 /* Prepare control words. */
130 memset(&ctx
->cword
, 0, sizeof(ctx
->cword
));
132 ctx
->cword
.decrypt
.encdec
= 1;
133 ctx
->cword
.encrypt
.rounds
= 10 + (key_len
- 16) / 4;
134 ctx
->cword
.decrypt
.rounds
= ctx
->cword
.encrypt
.rounds
;
135 ctx
->cword
.encrypt
.ksize
= (key_len
- 16) / 8;
136 ctx
->cword
.decrypt
.ksize
= ctx
->cword
.encrypt
.ksize
;
138 /* Don't generate extended keys if the hardware can do it. */
139 if (aes_hw_extkey_available(key_len
))
142 ctx
->D
= ctx
->d_data
;
143 ctx
->cword
.encrypt
.keygen
= 1;
144 ctx
->cword
.decrypt
.keygen
= 1;
146 if (crypto_aes_expand_key(&gen_aes
, in_key
, key_len
)) {
147 *flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
151 memcpy(ctx
->E
, gen_aes
.key_enc
, AES_MAX_KEYLENGTH
);
152 memcpy(ctx
->D
, gen_aes
.key_dec
, AES_MAX_KEYLENGTH
);
155 for_each_online_cpu(cpu
)
156 if (&ctx
->cword
.encrypt
== per_cpu(paes_last_cword
, cpu
) ||
157 &ctx
->cword
.decrypt
== per_cpu(paes_last_cword
, cpu
))
158 per_cpu(paes_last_cword
, cpu
) = NULL
;
163 /* ====== Encryption/decryption routines ====== */
165 /* These are the real call to PadLock. */
166 static inline void padlock_reset_key(struct cword
*cword
)
168 int cpu
= raw_smp_processor_id();
170 if (cword
!= per_cpu(paes_last_cword
, cpu
))
171 #ifndef CONFIG_X86_64
172 asm volatile ("pushfl; popfl");
174 asm volatile ("pushfq; popfq");
178 static inline void padlock_store_cword(struct cword
*cword
)
180 per_cpu(paes_last_cword
, raw_smp_processor_id()) = cword
;
184 * While the padlock instructions don't use FP/SSE registers, they
185 * generate a spurious DNA fault when cr0.ts is '1'. These instructions
186 * should be used only inside the irq_ts_save/restore() context
189 static inline void rep_xcrypt_ecb(const u8
*input
, u8
*output
, void *key
,
190 struct cword
*control_word
, int count
)
192 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
193 : "+S"(input
), "+D"(output
)
194 : "d"(control_word
), "b"(key
), "c"(count
));
197 static inline u8
*rep_xcrypt_cbc(const u8
*input
, u8
*output
, void *key
,
198 u8
*iv
, struct cword
*control_word
, int count
)
200 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
201 : "+S" (input
), "+D" (output
), "+a" (iv
)
202 : "d" (control_word
), "b" (key
), "c" (count
));
206 static void ecb_crypt_copy(const u8
*in
, u8
*out
, u32
*key
,
207 struct cword
*cword
, int count
)
210 * Padlock prefetches extra data so we must provide mapped input buffers.
211 * Assume there are at least 16 bytes of stack already in use.
213 u8 buf
[AES_BLOCK_SIZE
* (MAX_ECB_FETCH_BLOCKS
- 1) + PADLOCK_ALIGNMENT
- 1];
214 u8
*tmp
= PTR_ALIGN(&buf
[0], PADLOCK_ALIGNMENT
);
216 memcpy(tmp
, in
, count
* AES_BLOCK_SIZE
);
217 rep_xcrypt_ecb(tmp
, out
, key
, cword
, count
);
220 static u8
*cbc_crypt_copy(const u8
*in
, u8
*out
, u32
*key
,
221 u8
*iv
, struct cword
*cword
, int count
)
224 * Padlock prefetches extra data so we must provide mapped input buffers.
225 * Assume there are at least 16 bytes of stack already in use.
227 u8 buf
[AES_BLOCK_SIZE
* (MAX_CBC_FETCH_BLOCKS
- 1) + PADLOCK_ALIGNMENT
- 1];
228 u8
*tmp
= PTR_ALIGN(&buf
[0], PADLOCK_ALIGNMENT
);
230 memcpy(tmp
, in
, count
* AES_BLOCK_SIZE
);
231 return rep_xcrypt_cbc(tmp
, out
, key
, iv
, cword
, count
);
234 static inline void ecb_crypt(const u8
*in
, u8
*out
, u32
*key
,
235 struct cword
*cword
, int count
)
237 /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
238 * We could avoid some copying here but it's probably not worth it.
240 if (unlikely(((unsigned long)in
& ~PAGE_MASK
) + ecb_fetch_bytes
> PAGE_SIZE
)) {
241 ecb_crypt_copy(in
, out
, key
, cword
, count
);
245 rep_xcrypt_ecb(in
, out
, key
, cword
, count
);
248 static inline u8
*cbc_crypt(const u8
*in
, u8
*out
, u32
*key
,
249 u8
*iv
, struct cword
*cword
, int count
)
251 /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */
252 if (unlikely(((unsigned long)in
& ~PAGE_MASK
) + cbc_fetch_bytes
> PAGE_SIZE
))
253 return cbc_crypt_copy(in
, out
, key
, iv
, cword
, count
);
255 return rep_xcrypt_cbc(in
, out
, key
, iv
, cword
, count
);
258 static inline void padlock_xcrypt_ecb(const u8
*input
, u8
*output
, void *key
,
259 void *control_word
, u32 count
)
261 u32 initial
= count
& (ecb_fetch_blocks
- 1);
263 if (count
< ecb_fetch_blocks
) {
264 ecb_crypt(input
, output
, key
, control_word
, count
);
269 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
270 : "+S"(input
), "+D"(output
)
271 : "d"(control_word
), "b"(key
), "c"(initial
));
273 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
274 : "+S"(input
), "+D"(output
)
275 : "d"(control_word
), "b"(key
), "c"(count
- initial
));
278 static inline u8
*padlock_xcrypt_cbc(const u8
*input
, u8
*output
, void *key
,
279 u8
*iv
, void *control_word
, u32 count
)
281 u32 initial
= count
& (cbc_fetch_blocks
- 1);
283 if (count
< cbc_fetch_blocks
)
284 return cbc_crypt(input
, output
, key
, iv
, control_word
, count
);
287 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
288 : "+S" (input
), "+D" (output
), "+a" (iv
)
289 : "d" (control_word
), "b" (key
), "c" (count
));
291 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
292 : "+S" (input
), "+D" (output
), "+a" (iv
)
293 : "d" (control_word
), "b" (key
), "c" (count
-initial
));
297 static void aes_encrypt(struct crypto_tfm
*tfm
, u8
*out
, const u8
*in
)
299 struct aes_ctx
*ctx
= aes_ctx(tfm
);
302 padlock_reset_key(&ctx
->cword
.encrypt
);
303 ts_state
= irq_ts_save();
304 ecb_crypt(in
, out
, ctx
->E
, &ctx
->cword
.encrypt
, 1);
305 irq_ts_restore(ts_state
);
306 padlock_store_cword(&ctx
->cword
.encrypt
);
309 static void aes_decrypt(struct crypto_tfm
*tfm
, u8
*out
, const u8
*in
)
311 struct aes_ctx
*ctx
= aes_ctx(tfm
);
314 padlock_reset_key(&ctx
->cword
.encrypt
);
315 ts_state
= irq_ts_save();
316 ecb_crypt(in
, out
, ctx
->D
, &ctx
->cword
.decrypt
, 1);
317 irq_ts_restore(ts_state
);
318 padlock_store_cword(&ctx
->cword
.encrypt
);
321 static struct crypto_alg aes_alg
= {
323 .cra_driver_name
= "aes-padlock",
324 .cra_priority
= PADLOCK_CRA_PRIORITY
,
325 .cra_flags
= CRYPTO_ALG_TYPE_CIPHER
,
326 .cra_blocksize
= AES_BLOCK_SIZE
,
327 .cra_ctxsize
= sizeof(struct aes_ctx
),
328 .cra_alignmask
= PADLOCK_ALIGNMENT
- 1,
329 .cra_module
= THIS_MODULE
,
330 .cra_list
= LIST_HEAD_INIT(aes_alg
.cra_list
),
333 .cia_min_keysize
= AES_MIN_KEY_SIZE
,
334 .cia_max_keysize
= AES_MAX_KEY_SIZE
,
335 .cia_setkey
= aes_set_key
,
336 .cia_encrypt
= aes_encrypt
,
337 .cia_decrypt
= aes_decrypt
,
342 static int ecb_aes_encrypt(struct blkcipher_desc
*desc
,
343 struct scatterlist
*dst
, struct scatterlist
*src
,
346 struct aes_ctx
*ctx
= blk_aes_ctx(desc
->tfm
);
347 struct blkcipher_walk walk
;
351 padlock_reset_key(&ctx
->cword
.encrypt
);
353 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
354 err
= blkcipher_walk_virt(desc
, &walk
);
356 ts_state
= irq_ts_save();
357 while ((nbytes
= walk
.nbytes
)) {
358 padlock_xcrypt_ecb(walk
.src
.virt
.addr
, walk
.dst
.virt
.addr
,
359 ctx
->E
, &ctx
->cword
.encrypt
,
360 nbytes
/ AES_BLOCK_SIZE
);
361 nbytes
&= AES_BLOCK_SIZE
- 1;
362 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
364 irq_ts_restore(ts_state
);
366 padlock_store_cword(&ctx
->cword
.encrypt
);
371 static int ecb_aes_decrypt(struct blkcipher_desc
*desc
,
372 struct scatterlist
*dst
, struct scatterlist
*src
,
375 struct aes_ctx
*ctx
= blk_aes_ctx(desc
->tfm
);
376 struct blkcipher_walk walk
;
380 padlock_reset_key(&ctx
->cword
.decrypt
);
382 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
383 err
= blkcipher_walk_virt(desc
, &walk
);
385 ts_state
= irq_ts_save();
386 while ((nbytes
= walk
.nbytes
)) {
387 padlock_xcrypt_ecb(walk
.src
.virt
.addr
, walk
.dst
.virt
.addr
,
388 ctx
->D
, &ctx
->cword
.decrypt
,
389 nbytes
/ AES_BLOCK_SIZE
);
390 nbytes
&= AES_BLOCK_SIZE
- 1;
391 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
393 irq_ts_restore(ts_state
);
395 padlock_store_cword(&ctx
->cword
.encrypt
);
400 static struct crypto_alg ecb_aes_alg
= {
401 .cra_name
= "ecb(aes)",
402 .cra_driver_name
= "ecb-aes-padlock",
403 .cra_priority
= PADLOCK_COMPOSITE_PRIORITY
,
404 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
,
405 .cra_blocksize
= AES_BLOCK_SIZE
,
406 .cra_ctxsize
= sizeof(struct aes_ctx
),
407 .cra_alignmask
= PADLOCK_ALIGNMENT
- 1,
408 .cra_type
= &crypto_blkcipher_type
,
409 .cra_module
= THIS_MODULE
,
410 .cra_list
= LIST_HEAD_INIT(ecb_aes_alg
.cra_list
),
413 .min_keysize
= AES_MIN_KEY_SIZE
,
414 .max_keysize
= AES_MAX_KEY_SIZE
,
415 .setkey
= aes_set_key
,
416 .encrypt
= ecb_aes_encrypt
,
417 .decrypt
= ecb_aes_decrypt
,
422 static int cbc_aes_encrypt(struct blkcipher_desc
*desc
,
423 struct scatterlist
*dst
, struct scatterlist
*src
,
426 struct aes_ctx
*ctx
= blk_aes_ctx(desc
->tfm
);
427 struct blkcipher_walk walk
;
431 padlock_reset_key(&ctx
->cword
.encrypt
);
433 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
434 err
= blkcipher_walk_virt(desc
, &walk
);
436 ts_state
= irq_ts_save();
437 while ((nbytes
= walk
.nbytes
)) {
438 u8
*iv
= padlock_xcrypt_cbc(walk
.src
.virt
.addr
,
439 walk
.dst
.virt
.addr
, ctx
->E
,
440 walk
.iv
, &ctx
->cword
.encrypt
,
441 nbytes
/ AES_BLOCK_SIZE
);
442 memcpy(walk
.iv
, iv
, AES_BLOCK_SIZE
);
443 nbytes
&= AES_BLOCK_SIZE
- 1;
444 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
446 irq_ts_restore(ts_state
);
448 padlock_store_cword(&ctx
->cword
.decrypt
);
453 static int cbc_aes_decrypt(struct blkcipher_desc
*desc
,
454 struct scatterlist
*dst
, struct scatterlist
*src
,
457 struct aes_ctx
*ctx
= blk_aes_ctx(desc
->tfm
);
458 struct blkcipher_walk walk
;
462 padlock_reset_key(&ctx
->cword
.encrypt
);
464 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
465 err
= blkcipher_walk_virt(desc
, &walk
);
467 ts_state
= irq_ts_save();
468 while ((nbytes
= walk
.nbytes
)) {
469 padlock_xcrypt_cbc(walk
.src
.virt
.addr
, walk
.dst
.virt
.addr
,
470 ctx
->D
, walk
.iv
, &ctx
->cword
.decrypt
,
471 nbytes
/ AES_BLOCK_SIZE
);
472 nbytes
&= AES_BLOCK_SIZE
- 1;
473 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
476 irq_ts_restore(ts_state
);
478 padlock_store_cword(&ctx
->cword
.encrypt
);
483 static struct crypto_alg cbc_aes_alg
= {
484 .cra_name
= "cbc(aes)",
485 .cra_driver_name
= "cbc-aes-padlock",
486 .cra_priority
= PADLOCK_COMPOSITE_PRIORITY
,
487 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
,
488 .cra_blocksize
= AES_BLOCK_SIZE
,
489 .cra_ctxsize
= sizeof(struct aes_ctx
),
490 .cra_alignmask
= PADLOCK_ALIGNMENT
- 1,
491 .cra_type
= &crypto_blkcipher_type
,
492 .cra_module
= THIS_MODULE
,
493 .cra_list
= LIST_HEAD_INIT(cbc_aes_alg
.cra_list
),
496 .min_keysize
= AES_MIN_KEY_SIZE
,
497 .max_keysize
= AES_MAX_KEY_SIZE
,
498 .ivsize
= AES_BLOCK_SIZE
,
499 .setkey
= aes_set_key
,
500 .encrypt
= cbc_aes_encrypt
,
501 .decrypt
= cbc_aes_decrypt
,
506 static int __init
padlock_init(void)
509 struct cpuinfo_x86
*c
= &cpu_data(0);
511 if (!cpu_has_xcrypt
) {
512 printk(KERN_NOTICE PFX
"VIA PadLock not detected.\n");
516 if (!cpu_has_xcrypt_enabled
) {
517 printk(KERN_NOTICE PFX
"VIA PadLock detected, but not enabled. Hmm, strange...\n");
521 if ((ret
= crypto_register_alg(&aes_alg
)))
524 if ((ret
= crypto_register_alg(&ecb_aes_alg
)))
527 if ((ret
= crypto_register_alg(&cbc_aes_alg
)))
530 printk(KERN_NOTICE PFX
"Using VIA PadLock ACE for AES algorithm.\n");
532 if (c
->x86
== 6 && c
->x86_model
== 15 && c
->x86_mask
== 2) {
533 ecb_fetch_blocks
= MAX_ECB_FETCH_BLOCKS
;
534 cbc_fetch_blocks
= MAX_CBC_FETCH_BLOCKS
;
535 printk(KERN_NOTICE PFX
"VIA Nano stepping 2 detected: enabling workaround.\n");
542 crypto_unregister_alg(&ecb_aes_alg
);
544 crypto_unregister_alg(&aes_alg
);
546 printk(KERN_ERR PFX
"VIA PadLock AES initialization failed.\n");
550 static void __exit
padlock_fini(void)
552 crypto_unregister_alg(&cbc_aes_alg
);
553 crypto_unregister_alg(&ecb_aes_alg
);
554 crypto_unregister_alg(&aes_alg
);
557 module_init(padlock_init
);
558 module_exit(padlock_fini
);
560 MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
561 MODULE_LICENSE("GPL");
562 MODULE_AUTHOR("Michal Ludvig");