4 * Support for VIA PadLock hardware crypto engine.
6 * Copyright (c) 2004 Michal Ludvig <michal@logix.cz>
8 * Key expansion routine taken from crypto/aes_generic.c
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * ---------------------------------------------------------------------------
16 * Copyright (c) 2002, Dr Brian Gladman <brg@gladman.me.uk>, Worcester, UK.
17 * All rights reserved.
21 * The free distribution and use of this software in both source and binary
22 * form is allowed (with or without changes) provided that:
24 * 1. distributions of this source code include the above copyright
25 * notice, this list of conditions and the following disclaimer;
27 * 2. distributions in binary form include the above copyright
28 * notice, this list of conditions and the following disclaimer
29 * in the documentation and/or other associated materials;
31 * 3. the copyright holder's name is not used to endorse products
32 * built using this software without specific written permission.
34 * ALTERNATIVELY, provided that this notice is retained in full, this product
35 * may be distributed under the terms of the GNU General Public License (GPL),
36 * in which case the provisions of the GPL apply INSTEAD OF those given above.
40 * This software is provided 'as is' with no explicit or implied warranties
41 * in respect of its properties, including, but not limited to, correctness
42 * and/or fitness for purpose.
43 * ---------------------------------------------------------------------------
46 #include <crypto/algapi.h>
47 #include <crypto/aes.h>
48 #include <linux/module.h>
49 #include <linux/init.h>
50 #include <linux/types.h>
51 #include <linux/errno.h>
52 #include <linux/interrupt.h>
53 #include <linux/kernel.h>
54 #include <asm/byteorder.h>
57 #define AES_EXTENDED_KEY_SIZE 64 /* in uint32_t units */
58 #define AES_EXTENDED_KEY_SIZE_B (AES_EXTENDED_KEY_SIZE * sizeof(uint32_t))
62 unsigned int __attribute__ ((__packed__
))
69 } __attribute__ ((__aligned__(PADLOCK_ALIGNMENT
)));
71 /* Whenever making any changes to the following
72 * structure *make sure* you keep E, d_data
73 * and cword aligned on 16 Bytes boundaries!!! */
81 u32 E
[AES_EXTENDED_KEY_SIZE
]
82 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT
)));
83 u32 d_data
[AES_EXTENDED_KEY_SIZE
]
84 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT
)));
87 /* ====== Key management routines ====== */
89 static inline uint32_t
90 generic_rotr32 (const uint32_t x
, const unsigned bits
)
92 const unsigned n
= bits
% 32;
93 return (x
>> n
) | (x
<< (32 - n
));
96 static inline uint32_t
97 generic_rotl32 (const uint32_t x
, const unsigned bits
)
99 const unsigned n
= bits
% 32;
100 return (x
<< n
) | (x
>> (32 - n
));
103 #define rotl generic_rotl32
104 #define rotr generic_rotr32
107 * #define byte(x, nr) ((unsigned char)((x) >> (nr*8)))
109 static inline uint8_t
110 byte(const uint32_t x
, const unsigned n
)
112 return x
>> (n
<< 3);
118 static uint8_t pow_tab
[256];
119 static uint8_t log_tab
[256];
120 static uint8_t sbx_tab
[256];
121 static uint8_t isb_tab
[256];
122 static uint32_t rco_tab
[10];
123 static uint32_t ft_tab
[4][256];
124 static uint32_t it_tab
[4][256];
126 static uint32_t fl_tab
[4][256];
127 static uint32_t il_tab
[4][256];
129 static inline uint8_t
130 f_mult (uint8_t a
, uint8_t b
)
132 uint8_t aa
= log_tab
[a
], cc
= aa
+ log_tab
[b
];
134 return pow_tab
[cc
+ (cc
< aa
? 1 : 0)];
137 #define ff_mult(a,b) (a && b ? f_mult(a, b) : 0)
139 #define f_rn(bo, bi, n, k) \
140 bo[n] = ft_tab[0][byte(bi[n],0)] ^ \
141 ft_tab[1][byte(bi[(n + 1) & 3],1)] ^ \
142 ft_tab[2][byte(bi[(n + 2) & 3],2)] ^ \
143 ft_tab[3][byte(bi[(n + 3) & 3],3)] ^ *(k + n)
145 #define i_rn(bo, bi, n, k) \
146 bo[n] = it_tab[0][byte(bi[n],0)] ^ \
147 it_tab[1][byte(bi[(n + 3) & 3],1)] ^ \
148 it_tab[2][byte(bi[(n + 2) & 3],2)] ^ \
149 it_tab[3][byte(bi[(n + 1) & 3],3)] ^ *(k + n)
152 ( fl_tab[0][byte(x, 0)] ^ \
153 fl_tab[1][byte(x, 1)] ^ \
154 fl_tab[2][byte(x, 2)] ^ \
155 fl_tab[3][byte(x, 3)] )
157 #define f_rl(bo, bi, n, k) \
158 bo[n] = fl_tab[0][byte(bi[n],0)] ^ \
159 fl_tab[1][byte(bi[(n + 1) & 3],1)] ^ \
160 fl_tab[2][byte(bi[(n + 2) & 3],2)] ^ \
161 fl_tab[3][byte(bi[(n + 3) & 3],3)] ^ *(k + n)
163 #define i_rl(bo, bi, n, k) \
164 bo[n] = il_tab[0][byte(bi[n],0)] ^ \
165 il_tab[1][byte(bi[(n + 3) & 3],1)] ^ \
166 il_tab[2][byte(bi[(n + 2) & 3],2)] ^ \
167 il_tab[3][byte(bi[(n + 1) & 3],3)] ^ *(k + n)
175 /* log and power tables for GF(2**8) finite field with
176 0x011b as modular polynomial - the simplest prmitive
177 root is 0x03, used here to generate the tables */
179 for (i
= 0, p
= 1; i
< 256; ++i
) {
180 pow_tab
[i
] = (uint8_t) p
;
181 log_tab
[p
] = (uint8_t) i
;
183 p
^= (p
<< 1) ^ (p
& 0x80 ? 0x01b : 0);
188 for (i
= 0, p
= 1; i
< 10; ++i
) {
191 p
= (p
<< 1) ^ (p
& 0x80 ? 0x01b : 0);
194 for (i
= 0; i
< 256; ++i
) {
195 p
= (i
? pow_tab
[255 - log_tab
[i
]] : 0);
196 q
= ((p
>> 7) | (p
<< 1)) ^ ((p
>> 6) | (p
<< 2));
197 p
^= 0x63 ^ q
^ ((q
>> 6) | (q
<< 2));
199 isb_tab
[p
] = (uint8_t) i
;
202 for (i
= 0; i
< 256; ++i
) {
207 fl_tab
[1][i
] = rotl (t
, 8);
208 fl_tab
[2][i
] = rotl (t
, 16);
209 fl_tab
[3][i
] = rotl (t
, 24);
211 t
= ((uint32_t) ff_mult (2, p
)) |
212 ((uint32_t) p
<< 8) |
213 ((uint32_t) p
<< 16) | ((uint32_t) ff_mult (3, p
) << 24);
216 ft_tab
[1][i
] = rotl (t
, 8);
217 ft_tab
[2][i
] = rotl (t
, 16);
218 ft_tab
[3][i
] = rotl (t
, 24);
224 il_tab
[1][i
] = rotl (t
, 8);
225 il_tab
[2][i
] = rotl (t
, 16);
226 il_tab
[3][i
] = rotl (t
, 24);
228 t
= ((uint32_t) ff_mult (14, p
)) |
229 ((uint32_t) ff_mult (9, p
) << 8) |
230 ((uint32_t) ff_mult (13, p
) << 16) |
231 ((uint32_t) ff_mult (11, p
) << 24);
234 it_tab
[1][i
] = rotl (t
, 8);
235 it_tab
[2][i
] = rotl (t
, 16);
236 it_tab
[3][i
] = rotl (t
, 24);
240 #define star_x(x) (((x) & 0x7f7f7f7f) << 1) ^ ((((x) & 0x80808080) >> 7) * 0x1b)
242 #define imix_col(y,x) \
248 (y) ^= rotr(u ^ t, 8) ^ \
252 /* initialise the key schedule from the user supplied key */
255 { t = rotr(t, 8); t = ls_box(t) ^ rco_tab[i]; \
256 t ^= E_KEY[4 * i]; E_KEY[4 * i + 4] = t; \
257 t ^= E_KEY[4 * i + 1]; E_KEY[4 * i + 5] = t; \
258 t ^= E_KEY[4 * i + 2]; E_KEY[4 * i + 6] = t; \
259 t ^= E_KEY[4 * i + 3]; E_KEY[4 * i + 7] = t; \
263 { t = rotr(t, 8); t = ls_box(t) ^ rco_tab[i]; \
264 t ^= E_KEY[6 * i]; E_KEY[6 * i + 6] = t; \
265 t ^= E_KEY[6 * i + 1]; E_KEY[6 * i + 7] = t; \
266 t ^= E_KEY[6 * i + 2]; E_KEY[6 * i + 8] = t; \
267 t ^= E_KEY[6 * i + 3]; E_KEY[6 * i + 9] = t; \
268 t ^= E_KEY[6 * i + 4]; E_KEY[6 * i + 10] = t; \
269 t ^= E_KEY[6 * i + 5]; E_KEY[6 * i + 11] = t; \
273 { t = rotr(t, 8); ; t = ls_box(t) ^ rco_tab[i]; \
274 t ^= E_KEY[8 * i]; E_KEY[8 * i + 8] = t; \
275 t ^= E_KEY[8 * i + 1]; E_KEY[8 * i + 9] = t; \
276 t ^= E_KEY[8 * i + 2]; E_KEY[8 * i + 10] = t; \
277 t ^= E_KEY[8 * i + 3]; E_KEY[8 * i + 11] = t; \
278 t = E_KEY[8 * i + 4] ^ ls_box(t); \
279 E_KEY[8 * i + 12] = t; \
280 t ^= E_KEY[8 * i + 5]; E_KEY[8 * i + 13] = t; \
281 t ^= E_KEY[8 * i + 6]; E_KEY[8 * i + 14] = t; \
282 t ^= E_KEY[8 * i + 7]; E_KEY[8 * i + 15] = t; \
285 /* Tells whether the ACE is capable to generate
286 the extended key for a given key_len. */
288 aes_hw_extkey_available(uint8_t key_len
)
290 /* TODO: We should check the actual CPU model/stepping
291 as it's possible that the capability will be
292 added in the next CPU revisions. */
298 static inline struct aes_ctx
*aes_ctx_common(void *ctx
)
300 unsigned long addr
= (unsigned long)ctx
;
301 unsigned long align
= PADLOCK_ALIGNMENT
;
303 if (align
<= crypto_tfm_ctx_alignment())
305 return (struct aes_ctx
*)ALIGN(addr
, align
);
308 static inline struct aes_ctx
*aes_ctx(struct crypto_tfm
*tfm
)
310 return aes_ctx_common(crypto_tfm_ctx(tfm
));
313 static inline struct aes_ctx
*blk_aes_ctx(struct crypto_blkcipher
*tfm
)
315 return aes_ctx_common(crypto_blkcipher_ctx(tfm
));
318 static int aes_set_key(struct crypto_tfm
*tfm
, const u8
*in_key
,
319 unsigned int key_len
)
321 struct aes_ctx
*ctx
= aes_ctx(tfm
);
322 const __le32
*key
= (const __le32
*)in_key
;
323 u32
*flags
= &tfm
->crt_flags
;
324 uint32_t i
, t
, u
, v
, w
;
325 uint32_t P
[AES_EXTENDED_KEY_SIZE
];
329 *flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
333 ctx
->key_length
= key_len
;
336 * If the hardware is capable of generating the extended key
337 * itself we must supply the plain key for both encryption
342 E_KEY
[0] = le32_to_cpu(key
[0]);
343 E_KEY
[1] = le32_to_cpu(key
[1]);
344 E_KEY
[2] = le32_to_cpu(key
[2]);
345 E_KEY
[3] = le32_to_cpu(key
[3]);
347 /* Prepare control words. */
348 memset(&ctx
->cword
, 0, sizeof(ctx
->cword
));
350 ctx
->cword
.decrypt
.encdec
= 1;
351 ctx
->cword
.encrypt
.rounds
= 10 + (key_len
- 16) / 4;
352 ctx
->cword
.decrypt
.rounds
= ctx
->cword
.encrypt
.rounds
;
353 ctx
->cword
.encrypt
.ksize
= (key_len
- 16) / 8;
354 ctx
->cword
.decrypt
.ksize
= ctx
->cword
.encrypt
.ksize
;
356 /* Don't generate extended keys if the hardware can do it. */
357 if (aes_hw_extkey_available(key_len
))
360 ctx
->D
= ctx
->d_data
;
361 ctx
->cword
.encrypt
.keygen
= 1;
362 ctx
->cword
.decrypt
.keygen
= 1;
367 for (i
= 0; i
< 10; ++i
)
372 E_KEY
[4] = le32_to_cpu(key
[4]);
373 t
= E_KEY
[5] = le32_to_cpu(key
[5]);
374 for (i
= 0; i
< 8; ++i
)
379 E_KEY
[4] = le32_to_cpu(key
[4]);
380 E_KEY
[5] = le32_to_cpu(key
[5]);
381 E_KEY
[6] = le32_to_cpu(key
[6]);
382 t
= E_KEY
[7] = le32_to_cpu(key
[7]);
383 for (i
= 0; i
< 7; ++i
)
393 for (i
= 4; i
< key_len
+ 24; ++i
) {
394 imix_col (D_KEY
[i
], E_KEY
[i
]);
397 /* PadLock needs a different format of the decryption key. */
398 rounds
= 10 + (key_len
- 16) / 4;
400 for (i
= 0; i
< rounds
; i
++) {
401 P
[((i
+ 1) * 4) + 0] = D_KEY
[((rounds
- i
- 1) * 4) + 0];
402 P
[((i
+ 1) * 4) + 1] = D_KEY
[((rounds
- i
- 1) * 4) + 1];
403 P
[((i
+ 1) * 4) + 2] = D_KEY
[((rounds
- i
- 1) * 4) + 2];
404 P
[((i
+ 1) * 4) + 3] = D_KEY
[((rounds
- i
- 1) * 4) + 3];
407 P
[0] = E_KEY
[(rounds
* 4) + 0];
408 P
[1] = E_KEY
[(rounds
* 4) + 1];
409 P
[2] = E_KEY
[(rounds
* 4) + 2];
410 P
[3] = E_KEY
[(rounds
* 4) + 3];
412 memcpy(D_KEY
, P
, AES_EXTENDED_KEY_SIZE_B
);
417 /* ====== Encryption/decryption routines ====== */
419 /* These are the real call to PadLock. */
420 static inline void padlock_reset_key(void)
422 asm volatile ("pushfl; popfl");
425 static inline void padlock_xcrypt(const u8
*input
, u8
*output
, void *key
,
428 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
429 : "+S"(input
), "+D"(output
)
430 : "d"(control_word
), "b"(key
), "c"(1));
433 static void aes_crypt_copy(const u8
*in
, u8
*out
, u32
*key
, struct cword
*cword
)
435 u8 buf
[AES_BLOCK_SIZE
* 2 + PADLOCK_ALIGNMENT
- 1];
436 u8
*tmp
= PTR_ALIGN(&buf
[0], PADLOCK_ALIGNMENT
);
438 memcpy(tmp
, in
, AES_BLOCK_SIZE
);
439 padlock_xcrypt(tmp
, out
, key
, cword
);
442 static inline void aes_crypt(const u8
*in
, u8
*out
, u32
*key
,
445 /* padlock_xcrypt requires at least two blocks of data. */
446 if (unlikely(!(((unsigned long)in
^ (PAGE_SIZE
- AES_BLOCK_SIZE
)) &
448 aes_crypt_copy(in
, out
, key
, cword
);
452 padlock_xcrypt(in
, out
, key
, cword
);
455 static inline void padlock_xcrypt_ecb(const u8
*input
, u8
*output
, void *key
,
456 void *control_word
, u32 count
)
459 aes_crypt(input
, output
, key
, control_word
);
463 asm volatile ("test $1, %%cl;"
465 "lea -1(%%ecx), %%eax;"
467 ".byte 0xf3,0x0f,0xa7,0xc8;" /* rep xcryptecb */
470 ".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
471 : "+S"(input
), "+D"(output
)
472 : "d"(control_word
), "b"(key
), "c"(count
)
476 static inline u8
*padlock_xcrypt_cbc(const u8
*input
, u8
*output
, void *key
,
477 u8
*iv
, void *control_word
, u32 count
)
480 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"
481 : "+S" (input
), "+D" (output
), "+a" (iv
)
482 : "d" (control_word
), "b" (key
), "c" (count
));
486 static void aes_encrypt(struct crypto_tfm
*tfm
, u8
*out
, const u8
*in
)
488 struct aes_ctx
*ctx
= aes_ctx(tfm
);
490 aes_crypt(in
, out
, ctx
->E
, &ctx
->cword
.encrypt
);
493 static void aes_decrypt(struct crypto_tfm
*tfm
, u8
*out
, const u8
*in
)
495 struct aes_ctx
*ctx
= aes_ctx(tfm
);
497 aes_crypt(in
, out
, ctx
->D
, &ctx
->cword
.decrypt
);
500 static struct crypto_alg aes_alg
= {
502 .cra_driver_name
= "aes-padlock",
503 .cra_priority
= PADLOCK_CRA_PRIORITY
,
504 .cra_flags
= CRYPTO_ALG_TYPE_CIPHER
,
505 .cra_blocksize
= AES_BLOCK_SIZE
,
506 .cra_ctxsize
= sizeof(struct aes_ctx
),
507 .cra_alignmask
= PADLOCK_ALIGNMENT
- 1,
508 .cra_module
= THIS_MODULE
,
509 .cra_list
= LIST_HEAD_INIT(aes_alg
.cra_list
),
512 .cia_min_keysize
= AES_MIN_KEY_SIZE
,
513 .cia_max_keysize
= AES_MAX_KEY_SIZE
,
514 .cia_setkey
= aes_set_key
,
515 .cia_encrypt
= aes_encrypt
,
516 .cia_decrypt
= aes_decrypt
,
521 static int ecb_aes_encrypt(struct blkcipher_desc
*desc
,
522 struct scatterlist
*dst
, struct scatterlist
*src
,
525 struct aes_ctx
*ctx
= blk_aes_ctx(desc
->tfm
);
526 struct blkcipher_walk walk
;
531 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
532 err
= blkcipher_walk_virt(desc
, &walk
);
534 while ((nbytes
= walk
.nbytes
)) {
535 padlock_xcrypt_ecb(walk
.src
.virt
.addr
, walk
.dst
.virt
.addr
,
536 ctx
->E
, &ctx
->cword
.encrypt
,
537 nbytes
/ AES_BLOCK_SIZE
);
538 nbytes
&= AES_BLOCK_SIZE
- 1;
539 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
545 static int ecb_aes_decrypt(struct blkcipher_desc
*desc
,
546 struct scatterlist
*dst
, struct scatterlist
*src
,
549 struct aes_ctx
*ctx
= blk_aes_ctx(desc
->tfm
);
550 struct blkcipher_walk walk
;
555 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
556 err
= blkcipher_walk_virt(desc
, &walk
);
558 while ((nbytes
= walk
.nbytes
)) {
559 padlock_xcrypt_ecb(walk
.src
.virt
.addr
, walk
.dst
.virt
.addr
,
560 ctx
->D
, &ctx
->cword
.decrypt
,
561 nbytes
/ AES_BLOCK_SIZE
);
562 nbytes
&= AES_BLOCK_SIZE
- 1;
563 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
569 static struct crypto_alg ecb_aes_alg
= {
570 .cra_name
= "ecb(aes)",
571 .cra_driver_name
= "ecb-aes-padlock",
572 .cra_priority
= PADLOCK_COMPOSITE_PRIORITY
,
573 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
,
574 .cra_blocksize
= AES_BLOCK_SIZE
,
575 .cra_ctxsize
= sizeof(struct aes_ctx
),
576 .cra_alignmask
= PADLOCK_ALIGNMENT
- 1,
577 .cra_type
= &crypto_blkcipher_type
,
578 .cra_module
= THIS_MODULE
,
579 .cra_list
= LIST_HEAD_INIT(ecb_aes_alg
.cra_list
),
582 .min_keysize
= AES_MIN_KEY_SIZE
,
583 .max_keysize
= AES_MAX_KEY_SIZE
,
584 .setkey
= aes_set_key
,
585 .encrypt
= ecb_aes_encrypt
,
586 .decrypt
= ecb_aes_decrypt
,
591 static int cbc_aes_encrypt(struct blkcipher_desc
*desc
,
592 struct scatterlist
*dst
, struct scatterlist
*src
,
595 struct aes_ctx
*ctx
= blk_aes_ctx(desc
->tfm
);
596 struct blkcipher_walk walk
;
601 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
602 err
= blkcipher_walk_virt(desc
, &walk
);
604 while ((nbytes
= walk
.nbytes
)) {
605 u8
*iv
= padlock_xcrypt_cbc(walk
.src
.virt
.addr
,
606 walk
.dst
.virt
.addr
, ctx
->E
,
607 walk
.iv
, &ctx
->cword
.encrypt
,
608 nbytes
/ AES_BLOCK_SIZE
);
609 memcpy(walk
.iv
, iv
, AES_BLOCK_SIZE
);
610 nbytes
&= AES_BLOCK_SIZE
- 1;
611 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
617 static int cbc_aes_decrypt(struct blkcipher_desc
*desc
,
618 struct scatterlist
*dst
, struct scatterlist
*src
,
621 struct aes_ctx
*ctx
= blk_aes_ctx(desc
->tfm
);
622 struct blkcipher_walk walk
;
627 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
628 err
= blkcipher_walk_virt(desc
, &walk
);
630 while ((nbytes
= walk
.nbytes
)) {
631 padlock_xcrypt_cbc(walk
.src
.virt
.addr
, walk
.dst
.virt
.addr
,
632 ctx
->D
, walk
.iv
, &ctx
->cword
.decrypt
,
633 nbytes
/ AES_BLOCK_SIZE
);
634 nbytes
&= AES_BLOCK_SIZE
- 1;
635 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
641 static struct crypto_alg cbc_aes_alg
= {
642 .cra_name
= "cbc(aes)",
643 .cra_driver_name
= "cbc-aes-padlock",
644 .cra_priority
= PADLOCK_COMPOSITE_PRIORITY
,
645 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
,
646 .cra_blocksize
= AES_BLOCK_SIZE
,
647 .cra_ctxsize
= sizeof(struct aes_ctx
),
648 .cra_alignmask
= PADLOCK_ALIGNMENT
- 1,
649 .cra_type
= &crypto_blkcipher_type
,
650 .cra_module
= THIS_MODULE
,
651 .cra_list
= LIST_HEAD_INIT(cbc_aes_alg
.cra_list
),
654 .min_keysize
= AES_MIN_KEY_SIZE
,
655 .max_keysize
= AES_MAX_KEY_SIZE
,
656 .ivsize
= AES_BLOCK_SIZE
,
657 .setkey
= aes_set_key
,
658 .encrypt
= cbc_aes_encrypt
,
659 .decrypt
= cbc_aes_decrypt
,
664 static int __init
padlock_init(void)
668 if (!cpu_has_xcrypt
) {
669 printk(KERN_ERR PFX
"VIA PadLock not detected.\n");
673 if (!cpu_has_xcrypt_enabled
) {
674 printk(KERN_ERR PFX
"VIA PadLock detected, but not enabled. Hmm, strange...\n");
679 if ((ret
= crypto_register_alg(&aes_alg
)))
682 if ((ret
= crypto_register_alg(&ecb_aes_alg
)))
685 if ((ret
= crypto_register_alg(&cbc_aes_alg
)))
688 printk(KERN_NOTICE PFX
"Using VIA PadLock ACE for AES algorithm.\n");
694 crypto_unregister_alg(&ecb_aes_alg
);
696 crypto_unregister_alg(&aes_alg
);
698 printk(KERN_ERR PFX
"VIA PadLock AES initialization failed.\n");
702 static void __exit
padlock_fini(void)
704 crypto_unregister_alg(&cbc_aes_alg
);
705 crypto_unregister_alg(&ecb_aes_alg
);
706 crypto_unregister_alg(&aes_alg
);
709 module_init(padlock_init
);
710 module_exit(padlock_fini
);
712 MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
713 MODULE_LICENSE("GPL");
714 MODULE_AUTHOR("Michal Ludvig");