4 * Support for VIA PadLock hardware crypto engine.
6 * Copyright (c) 2004 Michal Ludvig <michal@logix.cz>
10 #include <crypto/algapi.h>
11 #include <crypto/aes.h>
12 #include <crypto/padlock.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/types.h>
16 #include <linux/errno.h>
17 #include <linux/interrupt.h>
18 #include <linux/kernel.h>
19 #include <linux/percpu.h>
20 #include <linux/smp.h>
21 #include <linux/slab.h>
22 #include <asm/cpu_device_id.h>
23 #include <asm/byteorder.h>
24 #include <asm/processor.h>
25 #include <asm/fpu/api.h>
28 * Number of data blocks actually fetched for each xcrypt insn.
29 * Processors with prefetch errata will fetch extra blocks.
31 static unsigned int ecb_fetch_blocks
= 2;
32 #define MAX_ECB_FETCH_BLOCKS (8)
33 #define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE)
35 static unsigned int cbc_fetch_blocks
= 1;
36 #define MAX_CBC_FETCH_BLOCKS (4)
37 #define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE)
41 unsigned int __attribute__ ((__packed__
))
48 } __attribute__ ((__aligned__(PADLOCK_ALIGNMENT
)));
50 /* Whenever making any changes to the following
51 * structure *make sure* you keep E, d_data
52 * and cword aligned on 16 Bytes boundaries and
53 * the Hardware can access 16 * 16 bytes of E and d_data
54 * (only the first 15 * 16 bytes matter but the HW reads
58 u32 E
[AES_MAX_KEYLENGTH_U32
]
59 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT
)));
60 u32 d_data
[AES_MAX_KEYLENGTH_U32
]
61 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT
)));
69 static DEFINE_PER_CPU(struct cword
*, paes_last_cword
);
71 /* Tells whether the ACE is capable to generate
72 the extended key for a given key_len. */
74 aes_hw_extkey_available(uint8_t key_len
)
76 /* TODO: We should check the actual CPU model/stepping
77 as it's possible that the capability will be
78 added in the next CPU revisions. */
84 static inline struct aes_ctx
*aes_ctx_common(void *ctx
)
86 unsigned long addr
= (unsigned long)ctx
;
87 unsigned long align
= PADLOCK_ALIGNMENT
;
89 if (align
<= crypto_tfm_ctx_alignment())
91 return (struct aes_ctx
*)ALIGN(addr
, align
);
94 static inline struct aes_ctx
*aes_ctx(struct crypto_tfm
*tfm
)
96 return aes_ctx_common(crypto_tfm_ctx(tfm
));
99 static inline struct aes_ctx
*blk_aes_ctx(struct crypto_blkcipher
*tfm
)
101 return aes_ctx_common(crypto_blkcipher_ctx(tfm
));
104 static int aes_set_key(struct crypto_tfm
*tfm
, const u8
*in_key
,
105 unsigned int key_len
)
107 struct aes_ctx
*ctx
= aes_ctx(tfm
);
108 const __le32
*key
= (const __le32
*)in_key
;
109 u32
*flags
= &tfm
->crt_flags
;
110 struct crypto_aes_ctx gen_aes
;
114 *flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
119 * If the hardware is capable of generating the extended key
120 * itself we must supply the plain key for both encryption
125 ctx
->E
[0] = le32_to_cpu(key
[0]);
126 ctx
->E
[1] = le32_to_cpu(key
[1]);
127 ctx
->E
[2] = le32_to_cpu(key
[2]);
128 ctx
->E
[3] = le32_to_cpu(key
[3]);
130 /* Prepare control words. */
131 memset(&ctx
->cword
, 0, sizeof(ctx
->cword
));
133 ctx
->cword
.decrypt
.encdec
= 1;
134 ctx
->cword
.encrypt
.rounds
= 10 + (key_len
- 16) / 4;
135 ctx
->cword
.decrypt
.rounds
= ctx
->cword
.encrypt
.rounds
;
136 ctx
->cword
.encrypt
.ksize
= (key_len
- 16) / 8;
137 ctx
->cword
.decrypt
.ksize
= ctx
->cword
.encrypt
.ksize
;
139 /* Don't generate extended keys if the hardware can do it. */
140 if (aes_hw_extkey_available(key_len
))
143 ctx
->D
= ctx
->d_data
;
144 ctx
->cword
.encrypt
.keygen
= 1;
145 ctx
->cword
.decrypt
.keygen
= 1;
147 if (crypto_aes_expand_key(&gen_aes
, in_key
, key_len
)) {
148 *flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
152 memcpy(ctx
->E
, gen_aes
.key_enc
, AES_MAX_KEYLENGTH
);
153 memcpy(ctx
->D
, gen_aes
.key_dec
, AES_MAX_KEYLENGTH
);
156 for_each_online_cpu(cpu
)
157 if (&ctx
->cword
.encrypt
== per_cpu(paes_last_cword
, cpu
) ||
158 &ctx
->cword
.decrypt
== per_cpu(paes_last_cword
, cpu
))
159 per_cpu(paes_last_cword
, cpu
) = NULL
;
164 /* ====== Encryption/decryption routines ====== */
166 /* These are the real call to PadLock. */
167 static inline void padlock_reset_key(struct cword
*cword
)
169 int cpu
= raw_smp_processor_id();
171 if (cword
!= per_cpu(paes_last_cword
, cpu
))
172 #ifndef CONFIG_X86_64
173 asm volatile ("pushfl; popfl");
175 asm volatile ("pushfq; popfq");
179 static inline void padlock_store_cword(struct cword
*cword
)
181 per_cpu(paes_last_cword
, raw_smp_processor_id()) = cword
;
185 * While the padlock instructions don't use FP/SSE registers, they
186 * generate a spurious DNA fault when CR0.TS is '1'. Fortunately,
187 * the kernel doesn't use CR0.TS.
190 static inline void rep_xcrypt_ecb(const u8
*input
, u8
*output
, void *key
,
191 struct cword
*control_word
, int count
)
193 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
194 : "+S"(input
), "+D"(output
)
195 : "d"(control_word
), "b"(key
), "c"(count
));
198 static inline u8
*rep_xcrypt_cbc(const u8
*input
, u8
*output
, void *key
,
199 u8
*iv
, struct cword
*control_word
, int count
)
201 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
202 : "+S" (input
), "+D" (output
), "+a" (iv
)
203 : "d" (control_word
), "b" (key
), "c" (count
));
207 static void ecb_crypt_copy(const u8
*in
, u8
*out
, u32
*key
,
208 struct cword
*cword
, int count
)
211 * Padlock prefetches extra data so we must provide mapped input buffers.
212 * Assume there are at least 16 bytes of stack already in use.
214 u8 buf
[AES_BLOCK_SIZE
* (MAX_ECB_FETCH_BLOCKS
- 1) + PADLOCK_ALIGNMENT
- 1];
215 u8
*tmp
= PTR_ALIGN(&buf
[0], PADLOCK_ALIGNMENT
);
217 memcpy(tmp
, in
, count
* AES_BLOCK_SIZE
);
218 rep_xcrypt_ecb(tmp
, out
, key
, cword
, count
);
221 static u8
*cbc_crypt_copy(const u8
*in
, u8
*out
, u32
*key
,
222 u8
*iv
, struct cword
*cword
, int count
)
225 * Padlock prefetches extra data so we must provide mapped input buffers.
226 * Assume there are at least 16 bytes of stack already in use.
228 u8 buf
[AES_BLOCK_SIZE
* (MAX_CBC_FETCH_BLOCKS
- 1) + PADLOCK_ALIGNMENT
- 1];
229 u8
*tmp
= PTR_ALIGN(&buf
[0], PADLOCK_ALIGNMENT
);
231 memcpy(tmp
, in
, count
* AES_BLOCK_SIZE
);
232 return rep_xcrypt_cbc(tmp
, out
, key
, iv
, cword
, count
);
235 static inline void ecb_crypt(const u8
*in
, u8
*out
, u32
*key
,
236 struct cword
*cword
, int count
)
238 /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
239 * We could avoid some copying here but it's probably not worth it.
241 if (unlikely(offset_in_page(in
) + ecb_fetch_bytes
> PAGE_SIZE
)) {
242 ecb_crypt_copy(in
, out
, key
, cword
, count
);
246 rep_xcrypt_ecb(in
, out
, key
, cword
, count
);
249 static inline u8
*cbc_crypt(const u8
*in
, u8
*out
, u32
*key
,
250 u8
*iv
, struct cword
*cword
, int count
)
252 /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */
253 if (unlikely(offset_in_page(in
) + cbc_fetch_bytes
> PAGE_SIZE
))
254 return cbc_crypt_copy(in
, out
, key
, iv
, cword
, count
);
256 return rep_xcrypt_cbc(in
, out
, key
, iv
, cword
, count
);
259 static inline void padlock_xcrypt_ecb(const u8
*input
, u8
*output
, void *key
,
260 void *control_word
, u32 count
)
262 u32 initial
= count
& (ecb_fetch_blocks
- 1);
264 if (count
< ecb_fetch_blocks
) {
265 ecb_crypt(input
, output
, key
, control_word
, count
);
270 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
271 : "+S"(input
), "+D"(output
)
272 : "d"(control_word
), "b"(key
), "c"(initial
));
274 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
275 : "+S"(input
), "+D"(output
)
276 : "d"(control_word
), "b"(key
), "c"(count
- initial
));
279 static inline u8
*padlock_xcrypt_cbc(const u8
*input
, u8
*output
, void *key
,
280 u8
*iv
, void *control_word
, u32 count
)
282 u32 initial
= count
& (cbc_fetch_blocks
- 1);
284 if (count
< cbc_fetch_blocks
)
285 return cbc_crypt(input
, output
, key
, iv
, control_word
, count
);
288 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
289 : "+S" (input
), "+D" (output
), "+a" (iv
)
290 : "d" (control_word
), "b" (key
), "c" (initial
));
292 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
293 : "+S" (input
), "+D" (output
), "+a" (iv
)
294 : "d" (control_word
), "b" (key
), "c" (count
-initial
));
298 static void aes_encrypt(struct crypto_tfm
*tfm
, u8
*out
, const u8
*in
)
300 struct aes_ctx
*ctx
= aes_ctx(tfm
);
302 padlock_reset_key(&ctx
->cword
.encrypt
);
303 ecb_crypt(in
, out
, ctx
->E
, &ctx
->cword
.encrypt
, 1);
304 padlock_store_cword(&ctx
->cword
.encrypt
);
307 static void aes_decrypt(struct crypto_tfm
*tfm
, u8
*out
, const u8
*in
)
309 struct aes_ctx
*ctx
= aes_ctx(tfm
);
311 padlock_reset_key(&ctx
->cword
.encrypt
);
312 ecb_crypt(in
, out
, ctx
->D
, &ctx
->cword
.decrypt
, 1);
313 padlock_store_cword(&ctx
->cword
.encrypt
);
316 static struct crypto_alg aes_alg
= {
318 .cra_driver_name
= "aes-padlock",
319 .cra_priority
= PADLOCK_CRA_PRIORITY
,
320 .cra_flags
= CRYPTO_ALG_TYPE_CIPHER
,
321 .cra_blocksize
= AES_BLOCK_SIZE
,
322 .cra_ctxsize
= sizeof(struct aes_ctx
),
323 .cra_alignmask
= PADLOCK_ALIGNMENT
- 1,
324 .cra_module
= THIS_MODULE
,
327 .cia_min_keysize
= AES_MIN_KEY_SIZE
,
328 .cia_max_keysize
= AES_MAX_KEY_SIZE
,
329 .cia_setkey
= aes_set_key
,
330 .cia_encrypt
= aes_encrypt
,
331 .cia_decrypt
= aes_decrypt
,
336 static int ecb_aes_encrypt(struct blkcipher_desc
*desc
,
337 struct scatterlist
*dst
, struct scatterlist
*src
,
340 struct aes_ctx
*ctx
= blk_aes_ctx(desc
->tfm
);
341 struct blkcipher_walk walk
;
344 padlock_reset_key(&ctx
->cword
.encrypt
);
346 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
347 err
= blkcipher_walk_virt(desc
, &walk
);
349 while ((nbytes
= walk
.nbytes
)) {
350 padlock_xcrypt_ecb(walk
.src
.virt
.addr
, walk
.dst
.virt
.addr
,
351 ctx
->E
, &ctx
->cword
.encrypt
,
352 nbytes
/ AES_BLOCK_SIZE
);
353 nbytes
&= AES_BLOCK_SIZE
- 1;
354 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
357 padlock_store_cword(&ctx
->cword
.encrypt
);
362 static int ecb_aes_decrypt(struct blkcipher_desc
*desc
,
363 struct scatterlist
*dst
, struct scatterlist
*src
,
366 struct aes_ctx
*ctx
= blk_aes_ctx(desc
->tfm
);
367 struct blkcipher_walk walk
;
370 padlock_reset_key(&ctx
->cword
.decrypt
);
372 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
373 err
= blkcipher_walk_virt(desc
, &walk
);
375 while ((nbytes
= walk
.nbytes
)) {
376 padlock_xcrypt_ecb(walk
.src
.virt
.addr
, walk
.dst
.virt
.addr
,
377 ctx
->D
, &ctx
->cword
.decrypt
,
378 nbytes
/ AES_BLOCK_SIZE
);
379 nbytes
&= AES_BLOCK_SIZE
- 1;
380 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
383 padlock_store_cword(&ctx
->cword
.encrypt
);
388 static struct crypto_alg ecb_aes_alg
= {
389 .cra_name
= "ecb(aes)",
390 .cra_driver_name
= "ecb-aes-padlock",
391 .cra_priority
= PADLOCK_COMPOSITE_PRIORITY
,
392 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
,
393 .cra_blocksize
= AES_BLOCK_SIZE
,
394 .cra_ctxsize
= sizeof(struct aes_ctx
),
395 .cra_alignmask
= PADLOCK_ALIGNMENT
- 1,
396 .cra_type
= &crypto_blkcipher_type
,
397 .cra_module
= THIS_MODULE
,
400 .min_keysize
= AES_MIN_KEY_SIZE
,
401 .max_keysize
= AES_MAX_KEY_SIZE
,
402 .setkey
= aes_set_key
,
403 .encrypt
= ecb_aes_encrypt
,
404 .decrypt
= ecb_aes_decrypt
,
409 static int cbc_aes_encrypt(struct blkcipher_desc
*desc
,
410 struct scatterlist
*dst
, struct scatterlist
*src
,
413 struct aes_ctx
*ctx
= blk_aes_ctx(desc
->tfm
);
414 struct blkcipher_walk walk
;
417 padlock_reset_key(&ctx
->cword
.encrypt
);
419 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
420 err
= blkcipher_walk_virt(desc
, &walk
);
422 while ((nbytes
= walk
.nbytes
)) {
423 u8
*iv
= padlock_xcrypt_cbc(walk
.src
.virt
.addr
,
424 walk
.dst
.virt
.addr
, ctx
->E
,
425 walk
.iv
, &ctx
->cword
.encrypt
,
426 nbytes
/ AES_BLOCK_SIZE
);
427 memcpy(walk
.iv
, iv
, AES_BLOCK_SIZE
);
428 nbytes
&= AES_BLOCK_SIZE
- 1;
429 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
432 padlock_store_cword(&ctx
->cword
.decrypt
);
437 static int cbc_aes_decrypt(struct blkcipher_desc
*desc
,
438 struct scatterlist
*dst
, struct scatterlist
*src
,
441 struct aes_ctx
*ctx
= blk_aes_ctx(desc
->tfm
);
442 struct blkcipher_walk walk
;
445 padlock_reset_key(&ctx
->cword
.encrypt
);
447 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
448 err
= blkcipher_walk_virt(desc
, &walk
);
450 while ((nbytes
= walk
.nbytes
)) {
451 padlock_xcrypt_cbc(walk
.src
.virt
.addr
, walk
.dst
.virt
.addr
,
452 ctx
->D
, walk
.iv
, &ctx
->cword
.decrypt
,
453 nbytes
/ AES_BLOCK_SIZE
);
454 nbytes
&= AES_BLOCK_SIZE
- 1;
455 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
458 padlock_store_cword(&ctx
->cword
.encrypt
);
463 static struct crypto_alg cbc_aes_alg
= {
464 .cra_name
= "cbc(aes)",
465 .cra_driver_name
= "cbc-aes-padlock",
466 .cra_priority
= PADLOCK_COMPOSITE_PRIORITY
,
467 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
,
468 .cra_blocksize
= AES_BLOCK_SIZE
,
469 .cra_ctxsize
= sizeof(struct aes_ctx
),
470 .cra_alignmask
= PADLOCK_ALIGNMENT
- 1,
471 .cra_type
= &crypto_blkcipher_type
,
472 .cra_module
= THIS_MODULE
,
475 .min_keysize
= AES_MIN_KEY_SIZE
,
476 .max_keysize
= AES_MAX_KEY_SIZE
,
477 .ivsize
= AES_BLOCK_SIZE
,
478 .setkey
= aes_set_key
,
479 .encrypt
= cbc_aes_encrypt
,
480 .decrypt
= cbc_aes_decrypt
,
485 static const struct x86_cpu_id padlock_cpu_id
[] = {
486 X86_FEATURE_MATCH(X86_FEATURE_XCRYPT
),
489 MODULE_DEVICE_TABLE(x86cpu
, padlock_cpu_id
);
491 static int __init
padlock_init(void)
494 struct cpuinfo_x86
*c
= &cpu_data(0);
496 if (!x86_match_cpu(padlock_cpu_id
))
499 if (!boot_cpu_has(X86_FEATURE_XCRYPT_EN
)) {
500 printk(KERN_NOTICE PFX
"VIA PadLock detected, but not enabled. Hmm, strange...\n");
504 if ((ret
= crypto_register_alg(&aes_alg
)))
507 if ((ret
= crypto_register_alg(&ecb_aes_alg
)))
510 if ((ret
= crypto_register_alg(&cbc_aes_alg
)))
513 printk(KERN_NOTICE PFX
"Using VIA PadLock ACE for AES algorithm.\n");
515 if (c
->x86
== 6 && c
->x86_model
== 15 && c
->x86_mask
== 2) {
516 ecb_fetch_blocks
= MAX_ECB_FETCH_BLOCKS
;
517 cbc_fetch_blocks
= MAX_CBC_FETCH_BLOCKS
;
518 printk(KERN_NOTICE PFX
"VIA Nano stepping 2 detected: enabling workaround.\n");
525 crypto_unregister_alg(&ecb_aes_alg
);
527 crypto_unregister_alg(&aes_alg
);
529 printk(KERN_ERR PFX
"VIA PadLock AES initialization failed.\n");
533 static void __exit
padlock_fini(void)
535 crypto_unregister_alg(&cbc_aes_alg
);
536 crypto_unregister_alg(&ecb_aes_alg
);
537 crypto_unregister_alg(&aes_alg
);
540 module_init(padlock_init
);
541 module_exit(padlock_fini
);
543 MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
544 MODULE_LICENSE("GPL");
545 MODULE_AUTHOR("Michal Ludvig");
547 MODULE_ALIAS_CRYPTO("aes");