1 /* Copyright (C) 2004-2006, Advanced Micro Devices, Inc.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/pci.h>
12 #include <linux/pci_ids.h>
13 #include <linux/crypto.h>
14 #include <linux/spinlock.h>
15 #include <crypto/algapi.h>
16 #include <crypto/aes.h>
19 #include <linux/delay.h>
21 #include "geode-aes.h"
23 /* Static structures */
25 static void __iomem
*_iobase
;
26 static spinlock_t lock
;
28 /* Write a 128 bit field (either a writable key or IV) */
30 _writefield(u32 offset
, void *value
)
33 for (i
= 0; i
< 4; i
++)
34 iowrite32(((u32
*) value
)[i
], _iobase
+ offset
+ (i
* 4));
37 /* Read a 128 bit field (either a writable key or IV) */
39 _readfield(u32 offset
, void *value
)
42 for (i
= 0; i
< 4; i
++)
43 ((u32
*) value
)[i
] = ioread32(_iobase
+ offset
+ (i
* 4));
47 do_crypt(void *src
, void *dst
, int len
, u32 flags
)
50 u32 counter
= AES_OP_TIMEOUT
;
52 iowrite32(virt_to_phys(src
), _iobase
+ AES_SOURCEA_REG
);
53 iowrite32(virt_to_phys(dst
), _iobase
+ AES_DSTA_REG
);
54 iowrite32(len
, _iobase
+ AES_LENA_REG
);
56 /* Start the operation */
57 iowrite32(AES_CTRL_START
| flags
, _iobase
+ AES_CTRLA_REG
);
60 status
= ioread32(_iobase
+ AES_INTR_REG
);
62 } while (!(status
& AES_INTRA_PENDING
) && --counter
);
65 iowrite32((status
& 0xFF) | AES_INTRA_PENDING
, _iobase
+ AES_INTR_REG
);
66 return counter
? 0 : 1;
70 geode_aes_crypt(struct geode_aes_op
*op
)
79 /* If the source and destination is the same, then
80 * we need to turn on the coherent flags, otherwise
81 * we don't need to worry
84 flags
|= (AES_CTRL_DCA
| AES_CTRL_SCA
);
86 if (op
->dir
== AES_DIR_ENCRYPT
)
87 flags
|= AES_CTRL_ENCRYPT
;
89 /* Start the critical section */
91 spin_lock_irqsave(&lock
, iflags
);
93 if (op
->mode
== AES_MODE_CBC
) {
94 flags
|= AES_CTRL_CBC
;
95 _writefield(AES_WRITEIV0_REG
, op
->iv
);
98 if (!(op
->flags
& AES_FLAGS_HIDDENKEY
)) {
99 flags
|= AES_CTRL_WRKEY
;
100 _writefield(AES_WRITEKEY0_REG
, op
->key
);
103 ret
= do_crypt(op
->src
, op
->dst
, op
->len
, flags
);
106 if (op
->mode
== AES_MODE_CBC
)
107 _readfield(AES_WRITEIV0_REG
, op
->iv
);
109 spin_unlock_irqrestore(&lock
, iflags
);
114 /* CRYPTO-API Functions */
116 static int geode_setkey_cip(struct crypto_tfm
*tfm
, const u8
*key
,
119 struct geode_aes_op
*op
= crypto_tfm_ctx(tfm
);
124 if (len
== AES_KEYSIZE_128
) {
125 memcpy(op
->key
, key
, len
);
129 if (len
!= AES_KEYSIZE_192
&& len
!= AES_KEYSIZE_256
) {
130 /* not supported at all */
131 tfm
->crt_flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
136 * The requested key size is not supported by HW, do a fallback
138 op
->fallback
.cip
->base
.crt_flags
&= ~CRYPTO_TFM_REQ_MASK
;
139 op
->fallback
.cip
->base
.crt_flags
|= (tfm
->crt_flags
& CRYPTO_TFM_REQ_MASK
);
141 ret
= crypto_cipher_setkey(op
->fallback
.cip
, key
, len
);
143 tfm
->crt_flags
&= ~CRYPTO_TFM_RES_MASK
;
144 tfm
->crt_flags
|= (op
->fallback
.cip
->base
.crt_flags
& CRYPTO_TFM_RES_MASK
);
149 static int geode_setkey_blk(struct crypto_tfm
*tfm
, const u8
*key
,
152 struct geode_aes_op
*op
= crypto_tfm_ctx(tfm
);
157 if (len
== AES_KEYSIZE_128
) {
158 memcpy(op
->key
, key
, len
);
162 if (len
!= AES_KEYSIZE_192
&& len
!= AES_KEYSIZE_256
) {
163 /* not supported at all */
164 tfm
->crt_flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
169 * The requested key size is not supported by HW, do a fallback
171 op
->fallback
.blk
->base
.crt_flags
&= ~CRYPTO_TFM_REQ_MASK
;
172 op
->fallback
.blk
->base
.crt_flags
|= (tfm
->crt_flags
& CRYPTO_TFM_REQ_MASK
);
174 ret
= crypto_blkcipher_setkey(op
->fallback
.blk
, key
, len
);
176 tfm
->crt_flags
&= ~CRYPTO_TFM_RES_MASK
;
177 tfm
->crt_flags
|= (op
->fallback
.blk
->base
.crt_flags
& CRYPTO_TFM_RES_MASK
);
182 static int fallback_blk_dec(struct blkcipher_desc
*desc
,
183 struct scatterlist
*dst
, struct scatterlist
*src
,
187 struct crypto_blkcipher
*tfm
;
188 struct geode_aes_op
*op
= crypto_blkcipher_ctx(desc
->tfm
);
191 desc
->tfm
= op
->fallback
.blk
;
193 ret
= crypto_blkcipher_decrypt_iv(desc
, dst
, src
, nbytes
);
198 static int fallback_blk_enc(struct blkcipher_desc
*desc
,
199 struct scatterlist
*dst
, struct scatterlist
*src
,
203 struct crypto_blkcipher
*tfm
;
204 struct geode_aes_op
*op
= crypto_blkcipher_ctx(desc
->tfm
);
207 desc
->tfm
= op
->fallback
.blk
;
209 ret
= crypto_blkcipher_encrypt_iv(desc
, dst
, src
, nbytes
);
216 geode_encrypt(struct crypto_tfm
*tfm
, u8
*out
, const u8
*in
)
218 struct geode_aes_op
*op
= crypto_tfm_ctx(tfm
);
220 if (unlikely(op
->keylen
!= AES_KEYSIZE_128
)) {
221 crypto_cipher_encrypt_one(op
->fallback
.cip
, out
, in
);
225 op
->src
= (void *) in
;
226 op
->dst
= (void *) out
;
227 op
->mode
= AES_MODE_ECB
;
229 op
->len
= AES_MIN_BLOCK_SIZE
;
230 op
->dir
= AES_DIR_ENCRYPT
;
237 geode_decrypt(struct crypto_tfm
*tfm
, u8
*out
, const u8
*in
)
239 struct geode_aes_op
*op
= crypto_tfm_ctx(tfm
);
241 if (unlikely(op
->keylen
!= AES_KEYSIZE_128
)) {
242 crypto_cipher_decrypt_one(op
->fallback
.cip
, out
, in
);
246 op
->src
= (void *) in
;
247 op
->dst
= (void *) out
;
248 op
->mode
= AES_MODE_ECB
;
250 op
->len
= AES_MIN_BLOCK_SIZE
;
251 op
->dir
= AES_DIR_DECRYPT
;
256 static int fallback_init_cip(struct crypto_tfm
*tfm
)
258 const char *name
= tfm
->__crt_alg
->cra_name
;
259 struct geode_aes_op
*op
= crypto_tfm_ctx(tfm
);
261 op
->fallback
.cip
= crypto_alloc_cipher(name
, 0,
262 CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
);
264 if (IS_ERR(op
->fallback
.cip
)) {
265 printk(KERN_ERR
"Error allocating fallback algo %s\n", name
);
266 return PTR_ERR(op
->fallback
.cip
);
272 static void fallback_exit_cip(struct crypto_tfm
*tfm
)
274 struct geode_aes_op
*op
= crypto_tfm_ctx(tfm
);
276 crypto_free_cipher(op
->fallback
.cip
);
277 op
->fallback
.cip
= NULL
;
280 static struct crypto_alg geode_alg
= {
282 .cra_driver_name
= "geode-aes",
285 .cra_flags
= CRYPTO_ALG_TYPE_CIPHER
|
286 CRYPTO_ALG_NEED_FALLBACK
,
287 .cra_init
= fallback_init_cip
,
288 .cra_exit
= fallback_exit_cip
,
289 .cra_blocksize
= AES_MIN_BLOCK_SIZE
,
290 .cra_ctxsize
= sizeof(struct geode_aes_op
),
291 .cra_module
= THIS_MODULE
,
292 .cra_list
= LIST_HEAD_INIT(geode_alg
.cra_list
),
295 .cia_min_keysize
= AES_MIN_KEY_SIZE
,
296 .cia_max_keysize
= AES_MAX_KEY_SIZE
,
297 .cia_setkey
= geode_setkey_cip
,
298 .cia_encrypt
= geode_encrypt
,
299 .cia_decrypt
= geode_decrypt
305 geode_cbc_decrypt(struct blkcipher_desc
*desc
,
306 struct scatterlist
*dst
, struct scatterlist
*src
,
309 struct geode_aes_op
*op
= crypto_blkcipher_ctx(desc
->tfm
);
310 struct blkcipher_walk walk
;
313 if (unlikely(op
->keylen
!= AES_KEYSIZE_128
))
314 return fallback_blk_dec(desc
, dst
, src
, nbytes
);
316 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
317 err
= blkcipher_walk_virt(desc
, &walk
);
320 while ((nbytes
= walk
.nbytes
)) {
321 op
->src
= walk
.src
.virt
.addr
,
322 op
->dst
= walk
.dst
.virt
.addr
;
323 op
->mode
= AES_MODE_CBC
;
324 op
->len
= nbytes
- (nbytes
% AES_MIN_BLOCK_SIZE
);
325 op
->dir
= AES_DIR_DECRYPT
;
327 ret
= geode_aes_crypt(op
);
330 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
337 geode_cbc_encrypt(struct blkcipher_desc
*desc
,
338 struct scatterlist
*dst
, struct scatterlist
*src
,
341 struct geode_aes_op
*op
= crypto_blkcipher_ctx(desc
->tfm
);
342 struct blkcipher_walk walk
;
345 if (unlikely(op
->keylen
!= AES_KEYSIZE_128
))
346 return fallback_blk_enc(desc
, dst
, src
, nbytes
);
348 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
349 err
= blkcipher_walk_virt(desc
, &walk
);
352 while ((nbytes
= walk
.nbytes
)) {
353 op
->src
= walk
.src
.virt
.addr
,
354 op
->dst
= walk
.dst
.virt
.addr
;
355 op
->mode
= AES_MODE_CBC
;
356 op
->len
= nbytes
- (nbytes
% AES_MIN_BLOCK_SIZE
);
357 op
->dir
= AES_DIR_ENCRYPT
;
359 ret
= geode_aes_crypt(op
);
361 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
367 static int fallback_init_blk(struct crypto_tfm
*tfm
)
369 const char *name
= tfm
->__crt_alg
->cra_name
;
370 struct geode_aes_op
*op
= crypto_tfm_ctx(tfm
);
372 op
->fallback
.blk
= crypto_alloc_blkcipher(name
, 0,
373 CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
);
375 if (IS_ERR(op
->fallback
.blk
)) {
376 printk(KERN_ERR
"Error allocating fallback algo %s\n", name
);
377 return PTR_ERR(op
->fallback
.blk
);
383 static void fallback_exit_blk(struct crypto_tfm
*tfm
)
385 struct geode_aes_op
*op
= crypto_tfm_ctx(tfm
);
387 crypto_free_blkcipher(op
->fallback
.blk
);
388 op
->fallback
.blk
= NULL
;
391 static struct crypto_alg geode_cbc_alg
= {
392 .cra_name
= "cbc(aes)",
393 .cra_driver_name
= "cbc-aes-geode",
395 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
|
396 CRYPTO_ALG_KERN_DRIVER_ONLY
|
397 CRYPTO_ALG_NEED_FALLBACK
,
398 .cra_init
= fallback_init_blk
,
399 .cra_exit
= fallback_exit_blk
,
400 .cra_blocksize
= AES_MIN_BLOCK_SIZE
,
401 .cra_ctxsize
= sizeof(struct geode_aes_op
),
403 .cra_type
= &crypto_blkcipher_type
,
404 .cra_module
= THIS_MODULE
,
405 .cra_list
= LIST_HEAD_INIT(geode_cbc_alg
.cra_list
),
408 .min_keysize
= AES_MIN_KEY_SIZE
,
409 .max_keysize
= AES_MAX_KEY_SIZE
,
410 .setkey
= geode_setkey_blk
,
411 .encrypt
= geode_cbc_encrypt
,
412 .decrypt
= geode_cbc_decrypt
,
413 .ivsize
= AES_IV_LENGTH
,
419 geode_ecb_decrypt(struct blkcipher_desc
*desc
,
420 struct scatterlist
*dst
, struct scatterlist
*src
,
423 struct geode_aes_op
*op
= crypto_blkcipher_ctx(desc
->tfm
);
424 struct blkcipher_walk walk
;
427 if (unlikely(op
->keylen
!= AES_KEYSIZE_128
))
428 return fallback_blk_dec(desc
, dst
, src
, nbytes
);
430 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
431 err
= blkcipher_walk_virt(desc
, &walk
);
433 while ((nbytes
= walk
.nbytes
)) {
434 op
->src
= walk
.src
.virt
.addr
,
435 op
->dst
= walk
.dst
.virt
.addr
;
436 op
->mode
= AES_MODE_ECB
;
437 op
->len
= nbytes
- (nbytes
% AES_MIN_BLOCK_SIZE
);
438 op
->dir
= AES_DIR_DECRYPT
;
440 ret
= geode_aes_crypt(op
);
442 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
449 geode_ecb_encrypt(struct blkcipher_desc
*desc
,
450 struct scatterlist
*dst
, struct scatterlist
*src
,
453 struct geode_aes_op
*op
= crypto_blkcipher_ctx(desc
->tfm
);
454 struct blkcipher_walk walk
;
457 if (unlikely(op
->keylen
!= AES_KEYSIZE_128
))
458 return fallback_blk_enc(desc
, dst
, src
, nbytes
);
460 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
461 err
= blkcipher_walk_virt(desc
, &walk
);
463 while ((nbytes
= walk
.nbytes
)) {
464 op
->src
= walk
.src
.virt
.addr
,
465 op
->dst
= walk
.dst
.virt
.addr
;
466 op
->mode
= AES_MODE_ECB
;
467 op
->len
= nbytes
- (nbytes
% AES_MIN_BLOCK_SIZE
);
468 op
->dir
= AES_DIR_ENCRYPT
;
470 ret
= geode_aes_crypt(op
);
472 ret
= blkcipher_walk_done(desc
, &walk
, nbytes
);
478 static struct crypto_alg geode_ecb_alg
= {
479 .cra_name
= "ecb(aes)",
480 .cra_driver_name
= "ecb-aes-geode",
482 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
|
483 CRYPTO_ALG_KERN_DRIVER_ONLY
|
484 CRYPTO_ALG_NEED_FALLBACK
,
485 .cra_init
= fallback_init_blk
,
486 .cra_exit
= fallback_exit_blk
,
487 .cra_blocksize
= AES_MIN_BLOCK_SIZE
,
488 .cra_ctxsize
= sizeof(struct geode_aes_op
),
490 .cra_type
= &crypto_blkcipher_type
,
491 .cra_module
= THIS_MODULE
,
492 .cra_list
= LIST_HEAD_INIT(geode_ecb_alg
.cra_list
),
495 .min_keysize
= AES_MIN_KEY_SIZE
,
496 .max_keysize
= AES_MAX_KEY_SIZE
,
497 .setkey
= geode_setkey_blk
,
498 .encrypt
= geode_ecb_encrypt
,
499 .decrypt
= geode_ecb_decrypt
,
504 static void __devexit
505 geode_aes_remove(struct pci_dev
*dev
)
507 crypto_unregister_alg(&geode_alg
);
508 crypto_unregister_alg(&geode_ecb_alg
);
509 crypto_unregister_alg(&geode_cbc_alg
);
511 pci_iounmap(dev
, _iobase
);
514 pci_release_regions(dev
);
515 pci_disable_device(dev
);
520 geode_aes_probe(struct pci_dev
*dev
, const struct pci_device_id
*id
)
523 ret
= pci_enable_device(dev
);
527 ret
= pci_request_regions(dev
, "geode-aes");
531 _iobase
= pci_iomap(dev
, 0, 0);
533 if (_iobase
== NULL
) {
538 spin_lock_init(&lock
);
540 /* Clear any pending activity */
541 iowrite32(AES_INTR_PENDING
| AES_INTR_MASK
, _iobase
+ AES_INTR_REG
);
543 ret
= crypto_register_alg(&geode_alg
);
547 ret
= crypto_register_alg(&geode_ecb_alg
);
551 ret
= crypto_register_alg(&geode_cbc_alg
);
555 printk(KERN_NOTICE
"geode-aes: GEODE AES engine enabled.\n");
559 crypto_unregister_alg(&geode_ecb_alg
);
562 crypto_unregister_alg(&geode_alg
);
565 pci_iounmap(dev
, _iobase
);
568 pci_release_regions(dev
);
571 pci_disable_device(dev
);
573 printk(KERN_ERR
"geode-aes: GEODE AES initialization failed.\n");
577 static struct pci_device_id geode_aes_tbl
[] = {
578 { PCI_VDEVICE(AMD
, PCI_DEVICE_ID_AMD_LX_AES
), } ,
582 MODULE_DEVICE_TABLE(pci
, geode_aes_tbl
);
584 static struct pci_driver geode_aes_driver
= {
585 .name
= "Geode LX AES",
586 .id_table
= geode_aes_tbl
,
587 .probe
= geode_aes_probe
,
588 .remove
= __devexit_p(geode_aes_remove
)
594 return pci_register_driver(&geode_aes_driver
);
600 pci_unregister_driver(&geode_aes_driver
);
603 MODULE_AUTHOR("Advanced Micro Devices, Inc.");
604 MODULE_DESCRIPTION("Geode LX Hardware AES driver");
605 MODULE_LICENSE("GPL");
607 module_init(geode_aes_init
);
608 module_exit(geode_aes_exit
);