2 * Driver for /dev/crypto device (aka CryptoDev)
4 * Copyright (c) 2004 Michal Ludvig <mludvig@logix.net.nz>, SuSE Labs
5 * Copyright (c) 2009,2010,2011 Nikos Mavrogiannopoulos <nmav@gnutls.org>
6 * Copyright (c) 2010 Phil Sutter
8 * This file is part of linux cryptodev.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
23 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
27 * Device /dev/crypto provides an interface for
28 * accessing kernel CryptoAPI algorithms (ciphers,
29 * hashes) from userspace programs.
31 * /dev/crypto interface was originally introduced in
32 * OpenBSD and this module attempts to keep the API.
36 #include <crypto/hash.h>
37 #include <linux/crypto.h>
39 #include <linux/highmem.h>
40 #include <linux/ioctl.h>
41 #include <linux/random.h>
42 #include <linux/syscalls.h>
43 #include <linux/pagemap.h>
44 #include <linux/poll.h>
45 #include <linux/uaccess.h>
46 #include <crypto/cryptodev.h>
47 #include <linux/scatterlist.h>
48 #include "cryptodev_int.h"
52 MODULE_AUTHOR("Nikos Mavrogiannopoulos <nmav@gnutls.org>");
53 MODULE_DESCRIPTION("CryptoDev driver");
54 MODULE_LICENSE("GPL");
56 /* ====== Compile-time config ====== */
58 /* Default (pre-allocated) and maximum size of the job queue.
59 * These are free, pending and done items all together. */
60 #define DEF_COP_RINGSIZE 16
61 #define MAX_COP_RINGSIZE 64
63 /* ====== Module parameters ====== */
65 int cryptodev_verbosity
;
66 module_param(cryptodev_verbosity
, int, 0644);
67 MODULE_PARM_DESC(cryptodev_verbosity
, "0: normal, 1: verbose, 2: debug");
69 /* ====== CryptoAPI ====== */
70 struct todo_list_item
{
71 struct list_head __hook
;
72 struct kernel_crypt_op kcop
;
77 struct list_head list
;
83 struct locked_list free
, todo
, done
;
85 struct work_struct cryptask
;
86 wait_queue_head_t user_waiter
;
89 #define FILL_SG(sg, ptr, len) \
91 (sg)->page = virt_to_page(ptr); \
92 (sg)->offset = offset_in_page(ptr); \
94 (sg)->dma_address = 0; \
97 /* cryptodev's own workqueue, keeps crypto tasks from disturbing the force */
98 static struct workqueue_struct
*cryptodev_wq
;
100 /* Prepare session for future use. */
102 crypto_create_session(struct fcrypt
*fcr
, struct session_op
*sop
)
104 struct csession
*ses_new
= NULL
, *ses_ptr
;
106 const char *alg_name
= NULL
;
107 const char *hash_name
= NULL
;
108 int hmac_mode
= 1, stream
= 0, aead
= 0;
110 /* Does the request make sense? */
111 if (unlikely(!sop
->cipher
&& !sop
->mac
)) {
112 dprintk(1, KERN_DEBUG
, "Both 'cipher' and 'mac' unset.\n");
116 switch (sop
->cipher
) {
120 alg_name
= "cbc(des)";
122 case CRYPTO_3DES_CBC
:
123 alg_name
= "cbc(des3_ede)";
126 alg_name
= "cbc(blowfish)";
129 alg_name
= "cbc(aes)";
132 alg_name
= "ecb(aes)";
134 case CRYPTO_CAMELLIA_CBC
:
135 alg_name
= "cbc(camellia)";
138 alg_name
= "ctr(aes)";
142 alg_name
= "gcm(aes)";
147 alg_name
= "ecb(cipher_null)";
151 dprintk(1, KERN_DEBUG
, "%s: bad cipher: %d\n", __func__
,
159 case CRYPTO_MD5_HMAC
:
160 hash_name
= "hmac(md5)";
162 case CRYPTO_RIPEMD160_HMAC
:
163 hash_name
= "hmac(rmd160)";
165 case CRYPTO_SHA1_HMAC
:
166 hash_name
= "hmac(sha1)";
168 case CRYPTO_SHA2_256_HMAC
:
169 hash_name
= "hmac(sha256)";
171 case CRYPTO_SHA2_384_HMAC
:
172 hash_name
= "hmac(sha384)";
174 case CRYPTO_SHA2_512_HMAC
:
175 hash_name
= "hmac(sha512)";
183 case CRYPTO_RIPEMD160
:
184 hash_name
= "rmd160";
191 case CRYPTO_SHA2_256
:
192 hash_name
= "sha256";
195 case CRYPTO_SHA2_384
:
196 hash_name
= "sha384";
199 case CRYPTO_SHA2_512
:
200 hash_name
= "sha512";
204 dprintk(1, KERN_DEBUG
, "%s: bad mac: %d\n", __func__
,
209 /* Create a session and put it to the list. */
210 ses_new
= kzalloc(sizeof(*ses_new
), GFP_KERNEL
);
214 /* Set-up crypto transform. */
216 uint8_t keyp
[CRYPTO_CIPHER_MAX_KEY_LEN
];
218 if (unlikely(sop
->keylen
> CRYPTO_CIPHER_MAX_KEY_LEN
)) {
219 dprintk(1, KERN_DEBUG
,
220 "Setting key failed for %s-%zu.\n",
221 alg_name
, (size_t)sop
->keylen
*8);
226 if (unlikely(copy_from_user(keyp
, sop
->key
, sop
->keylen
))) {
231 ret
= cryptodev_cipher_init(&ses_new
->cdata
, alg_name
, keyp
,
232 sop
->keylen
, stream
, aead
);
234 dprintk(1, KERN_DEBUG
,
235 "%s: Failed to load cipher for %s\n",
242 if (hash_name
&& aead
== 0) {
243 uint8_t keyp
[CRYPTO_HMAC_MAX_KEY_LEN
];
245 if (unlikely(sop
->mackeylen
> CRYPTO_HMAC_MAX_KEY_LEN
)) {
246 dprintk(1, KERN_DEBUG
,
247 "Setting key failed for %s-%zu.\n",
248 alg_name
, (size_t)sop
->mackeylen
*8);
253 if (sop
->mackey
&& unlikely(copy_from_user(keyp
, sop
->mackey
,
259 ret
= cryptodev_hash_init(&ses_new
->hdata
, hash_name
, hmac_mode
,
260 keyp
, sop
->mackeylen
);
262 dprintk(1, KERN_DEBUG
,
263 "%s: Failed to load hash for %s\n",
264 __func__
, hash_name
);
270 ses_new
->alignmask
= max(ses_new
->cdata
.alignmask
,
271 ses_new
->hdata
.alignmask
);
272 dprintk(2, KERN_DEBUG
, "%s: got alignmask %d\n", __func__
, ses_new
->alignmask
);
274 ses_new
->array_size
= DEFAULT_PREALLOC_PAGES
;
275 dprintk(2, KERN_DEBUG
, "%s: preallocating for %d user pages\n",
276 __func__
, ses_new
->array_size
);
277 ses_new
->pages
= kzalloc(ses_new
->array_size
*
278 sizeof(struct page
*), GFP_KERNEL
);
279 ses_new
->sg
= kzalloc(ses_new
->array_size
*
280 sizeof(struct scatterlist
), GFP_KERNEL
);
281 if (ses_new
->sg
== NULL
|| ses_new
->pages
== NULL
) {
282 dprintk(0, KERN_DEBUG
, "Memory error\n");
287 /* put the new session to the list */
288 get_random_bytes(&ses_new
->sid
, sizeof(ses_new
->sid
));
289 mutex_init(&ses_new
->sem
);
291 mutex_lock(&fcr
->sem
);
293 list_for_each_entry(ses_ptr
, &fcr
->list
, entry
) {
294 /* Check for duplicate SID */
295 if (unlikely(ses_new
->sid
== ses_ptr
->sid
)) {
296 get_random_bytes(&ses_new
->sid
, sizeof(ses_new
->sid
));
297 /* Unless we have a broken RNG this
298 shouldn't loop forever... ;-) */
303 list_add(&ses_new
->entry
, &fcr
->list
);
304 mutex_unlock(&fcr
->sem
);
306 /* Fill in some values for the user. */
307 sop
->ses
= ses_new
->sid
;
312 cryptodev_cipher_deinit(&ses_new
->cdata
);
314 kfree(ses_new
->pages
);
322 /* Everything that needs to be done when remowing a session. */
324 crypto_destroy_session(struct csession
*ses_ptr
)
326 if (!mutex_trylock(&ses_ptr
->sem
)) {
327 dprintk(2, KERN_DEBUG
, "Waiting for semaphore of sid=0x%08X\n",
329 mutex_lock(&ses_ptr
->sem
);
331 dprintk(2, KERN_DEBUG
, "Removed session 0x%08X\n", ses_ptr
->sid
);
332 cryptodev_cipher_deinit(&ses_ptr
->cdata
);
333 cryptodev_hash_deinit(&ses_ptr
->hdata
);
334 dprintk(2, KERN_DEBUG
, "%s: freeing space for %d user pages\n",
335 __func__
, ses_ptr
->array_size
);
336 kfree(ses_ptr
->pages
);
338 mutex_unlock(&ses_ptr
->sem
);
342 /* Look up a session by ID and remove. */
344 crypto_finish_session(struct fcrypt
*fcr
, uint32_t sid
)
346 struct csession
*tmp
, *ses_ptr
;
347 struct list_head
*head
;
350 mutex_lock(&fcr
->sem
);
352 list_for_each_entry_safe(ses_ptr
, tmp
, head
, entry
) {
353 if (ses_ptr
->sid
== sid
) {
354 list_del(&ses_ptr
->entry
);
355 crypto_destroy_session(ses_ptr
);
360 if (unlikely(!ses_ptr
)) {
361 dprintk(1, KERN_ERR
, "Session with sid=0x%08X not found!\n",
365 mutex_unlock(&fcr
->sem
);
370 /* Remove all sessions when closing the file */
372 crypto_finish_all_sessions(struct fcrypt
*fcr
)
374 struct csession
*tmp
, *ses_ptr
;
375 struct list_head
*head
;
377 mutex_lock(&fcr
->sem
);
380 list_for_each_entry_safe(ses_ptr
, tmp
, head
, entry
) {
381 list_del(&ses_ptr
->entry
);
382 crypto_destroy_session(ses_ptr
);
384 mutex_unlock(&fcr
->sem
);
389 /* Look up session by session ID. The returned session is locked. */
391 crypto_get_session_by_sid(struct fcrypt
*fcr
, uint32_t sid
)
393 struct csession
*ses_ptr
, *retval
= NULL
;
395 if (unlikely(fcr
== NULL
))
398 mutex_lock(&fcr
->sem
);
399 list_for_each_entry(ses_ptr
, &fcr
->list
, entry
) {
400 if (ses_ptr
->sid
== sid
) {
401 mutex_lock(&ses_ptr
->sem
);
406 mutex_unlock(&fcr
->sem
);
411 static void cryptask_routine(struct work_struct
*work
)
413 struct crypt_priv
*pcr
= container_of(work
, struct crypt_priv
, cryptask
);
414 struct todo_list_item
*item
;
417 /* fetch all pending jobs into the temporary list */
418 mutex_lock(&pcr
->todo
.lock
);
419 list_cut_position(&tmp
, &pcr
->todo
.list
, pcr
->todo
.list
.prev
);
420 mutex_unlock(&pcr
->todo
.lock
);
422 /* handle each job locklessly */
423 list_for_each_entry(item
, &tmp
, __hook
) {
424 item
->result
= crypto_run(&pcr
->fcrypt
, &item
->kcop
);
425 if (unlikely(item
->result
))
426 dprintk(0, KERN_ERR
, "%s: crypto_run() failed: %d\n",
427 __func__
, item
->result
);
430 /* push all handled jobs to the done list at once */
431 mutex_lock(&pcr
->done
.lock
);
432 list_splice_tail(&tmp
, &pcr
->done
.list
);
433 mutex_unlock(&pcr
->done
.lock
);
435 /* wake for POLLIN */
436 wake_up_interruptible(&pcr
->user_waiter
);
439 /* ====== /dev/crypto ====== */
442 cryptodev_open(struct inode
*inode
, struct file
*filp
)
444 struct todo_list_item
*tmp
;
445 struct crypt_priv
*pcr
;
448 pcr
= kmalloc(sizeof(*pcr
), GFP_KERNEL
);
452 memset(pcr
, 0, sizeof(*pcr
));
453 mutex_init(&pcr
->fcrypt
.sem
);
454 INIT_LIST_HEAD(&pcr
->fcrypt
.list
);
456 INIT_LIST_HEAD(&pcr
->free
.list
);
457 INIT_LIST_HEAD(&pcr
->todo
.list
);
458 INIT_LIST_HEAD(&pcr
->done
.list
);
459 INIT_WORK(&pcr
->cryptask
, cryptask_routine
);
460 mutex_init(&pcr
->free
.lock
);
461 mutex_init(&pcr
->todo
.lock
);
462 mutex_init(&pcr
->done
.lock
);
463 init_waitqueue_head(&pcr
->user_waiter
);
465 for (i
= 0; i
< DEF_COP_RINGSIZE
; i
++) {
466 tmp
= kzalloc(sizeof(struct todo_list_item
), GFP_KERNEL
);
468 dprintk(2, KERN_DEBUG
, "%s: allocated new item at %lx\n",
469 __func__
, (unsigned long)tmp
);
470 list_add(&tmp
->__hook
, &pcr
->free
.list
);
473 filp
->private_data
= pcr
;
474 dprintk(2, KERN_DEBUG
,
475 "Cryptodev handle initialised, %d elements in queue\n",
481 cryptodev_release(struct inode
*inode
, struct file
*filp
)
483 struct crypt_priv
*pcr
= filp
->private_data
;
484 struct todo_list_item
*item
, *item_safe
;
490 cancel_work_sync(&pcr
->cryptask
);
492 mutex_destroy(&pcr
->todo
.lock
);
493 mutex_destroy(&pcr
->done
.lock
);
494 mutex_destroy(&pcr
->free
.lock
);
496 list_splice_tail(&pcr
->todo
.list
, &pcr
->free
.list
);
497 list_splice_tail(&pcr
->done
.list
, &pcr
->free
.list
);
499 list_for_each_entry_safe(item
, item_safe
, &pcr
->free
.list
, __hook
) {
500 dprintk(2, KERN_DEBUG
, "%s: freeing item at %lx\n",
501 __func__
, (unsigned long)item
);
502 list_del(&item
->__hook
);
507 if (items_freed
!= pcr
->itemcount
) {
509 "%s: freed %d items, but %d should exist!\n",
510 __func__
, items_freed
, pcr
->itemcount
);
513 crypto_finish_all_sessions(&pcr
->fcrypt
);
515 filp
->private_data
= NULL
;
517 dprintk(2, KERN_DEBUG
,
518 "Cryptodev handle deinitialised, %d elements freed\n",
524 clonefd(struct file
*filp
)
527 ret
= get_unused_fd();
530 fd_install(ret
, filp
);
536 /* enqueue a job for asynchronous completion
539 * -EBUSY when there are no free queue slots left
540 * (and the number of slots has reached it MAX_COP_RINGSIZE)
541 * -EFAULT when there was a memory allocation error
543 static int crypto_async_run(struct crypt_priv
*pcr
, struct kernel_crypt_op
*kcop
)
545 struct todo_list_item
*item
= NULL
;
547 mutex_lock(&pcr
->free
.lock
);
548 if (likely(!list_empty(&pcr
->free
.list
))) {
549 item
= list_first_entry(&pcr
->free
.list
,
550 struct todo_list_item
, __hook
);
551 list_del(&item
->__hook
);
552 } else if (pcr
->itemcount
< MAX_COP_RINGSIZE
) {
555 mutex_unlock(&pcr
->free
.lock
);
558 mutex_unlock(&pcr
->free
.lock
);
560 if (unlikely(!item
)) {
561 item
= kzalloc(sizeof(struct todo_list_item
), GFP_KERNEL
);
564 dprintk(1, KERN_INFO
, "%s: increased item count to %d\n",
565 __func__
, pcr
->itemcount
);
568 memcpy(&item
->kcop
, kcop
, sizeof(struct kernel_crypt_op
));
570 mutex_lock(&pcr
->todo
.lock
);
571 list_add_tail(&item
->__hook
, &pcr
->todo
.list
);
572 mutex_unlock(&pcr
->todo
.lock
);
574 queue_work(cryptodev_wq
, &pcr
->cryptask
);
578 /* get the first completed job from the "done" queue
581 * -EBUSY if no completed jobs are ready (yet)
582 * the return value of crypto_run() otherwise */
583 static int crypto_async_fetch(struct crypt_priv
*pcr
,
584 struct kernel_crypt_op
*kcop
)
586 struct todo_list_item
*item
;
589 mutex_lock(&pcr
->done
.lock
);
590 if (list_empty(&pcr
->done
.list
)) {
591 mutex_unlock(&pcr
->done
.lock
);
594 item
= list_first_entry(&pcr
->done
.list
, struct todo_list_item
, __hook
);
595 list_del(&item
->__hook
);
596 mutex_unlock(&pcr
->done
.lock
);
598 memcpy(kcop
, &item
->kcop
, sizeof(struct kernel_crypt_op
));
599 retval
= item
->result
;
601 mutex_lock(&pcr
->free
.lock
);
602 list_add_tail(&item
->__hook
, &pcr
->free
.list
);
603 mutex_unlock(&pcr
->free
.lock
);
605 /* wake for POLLOUT */
606 wake_up_interruptible(&pcr
->user_waiter
);
611 /* this function has to be called from process context */
612 static int fill_kcop_from_cop(struct kernel_crypt_op
*kcop
, struct fcrypt
*fcr
)
614 struct crypt_op
*cop
= &kcop
->cop
;
615 struct csession
*ses_ptr
;
618 /* this also enters ses_ptr->sem */
619 ses_ptr
= crypto_get_session_by_sid(fcr
, cop
->ses
);
620 if (unlikely(!ses_ptr
)) {
621 dprintk(1, KERN_ERR
, "invalid session ID=0x%08X\n", cop
->ses
);
624 kcop
->ivlen
= cop
->iv
? ses_ptr
->cdata
.ivsize
: 0;
625 kcop
->digestsize
= 0; /* will be updated during operation */
627 crypto_put_session(ses_ptr
);
629 kcop
->task
= current
;
630 kcop
->mm
= current
->mm
;
633 rc
= copy_from_user(kcop
->iv
, cop
->iv
, kcop
->ivlen
);
636 "error copying IV (%d bytes), copy_from_user returned %d for address %lx\n",
637 kcop
->ivlen
, rc
, (unsigned long)cop
->iv
);
645 /* this function has to be called from process context */
646 static int fill_cop_from_kcop(struct kernel_crypt_op
*kcop
, struct fcrypt
*fcr
)
650 if (kcop
->digestsize
) {
651 ret
= copy_to_user(kcop
->cop
.mac
,
652 kcop
->hash_output
, kcop
->digestsize
);
656 if (kcop
->ivlen
&& kcop
->cop
.flags
& COP_FLAG_WRITE_IV
) {
657 ret
= copy_to_user(kcop
->cop
.iv
,
658 kcop
->iv
, kcop
->ivlen
);
665 static int kcop_from_user(struct kernel_crypt_op
*kcop
,
666 struct fcrypt
*fcr
, void __user
*arg
)
668 if (unlikely(copy_from_user(&kcop
->cop
, arg
, sizeof(kcop
->cop
))))
671 return fill_kcop_from_cop(kcop
, fcr
);
674 static int kcop_to_user(struct kernel_crypt_op
*kcop
,
675 struct fcrypt
*fcr
, void __user
*arg
)
679 ret
= fill_cop_from_kcop(kcop
, fcr
);
681 dprintk(1, KERN_ERR
, "Error in fill_cop_from_kcop\n");
685 if (unlikely(copy_to_user(arg
, &kcop
->cop
, sizeof(kcop
->cop
)))) {
686 dprintk(1, KERN_ERR
, "Cannot copy to userspace\n");
692 static inline void tfm_info_to_alg_info(struct alg_info
*dst
, struct crypto_tfm
*tfm
)
694 snprintf(dst
->cra_name
, CRYPTODEV_MAX_ALG_NAME
,
695 "%s", crypto_tfm_alg_name(tfm
));
696 snprintf(dst
->cra_driver_name
, CRYPTODEV_MAX_ALG_NAME
,
697 "%s", crypto_tfm_alg_driver_name(tfm
));
700 static unsigned int is_known_accelerated(struct crypto_tfm
*tfm
)
702 const char* name
= crypto_tfm_alg_driver_name(tfm
);
705 return 1; /* assume accelerated */
707 if (strstr(name
, "-talitos"))
709 else if (strncmp(name
, "mv-", 3))
711 else if (strstr(name
, "geode"))
713 else if (strstr(name
, "hifn"))
715 else if (strstr(name
, "-ixp4xx"))
717 else if (strstr(name
, "-omap"))
719 else if (strstr(name
, "-picoxcell"))
721 else if (strstr(name
, "-s5p"))
723 else if (strstr(name
, "-ppc4xx"))
725 else if (strstr(name
, "-caam"))
727 else if (strstr(name
, "-n2"))
733 static int get_session_info(struct fcrypt
*fcr
, struct session_info_op
*siop
)
735 struct csession
*ses_ptr
;
736 struct crypto_tfm
*tfm
;
738 /* this also enters ses_ptr->sem */
739 ses_ptr
= crypto_get_session_by_sid(fcr
, siop
->ses
);
740 if (unlikely(!ses_ptr
)) {
741 dprintk(1, KERN_ERR
, "invalid session ID=0x%08X\n", siop
->ses
);
747 if (ses_ptr
->cdata
.init
) {
748 if (ses_ptr
->cdata
.aead
== 0) {
749 tfm
= crypto_ablkcipher_tfm(ses_ptr
->cdata
.async
.s
);
751 tfm
= crypto_aead_tfm(ses_ptr
->cdata
.async
.as
);
753 tfm_info_to_alg_info(&siop
->cipher_info
, tfm
);
754 #ifdef CRYPTO_ALG_KERN_DRIVER_ONLY
755 if (tfm
->__crt_alg
->cra_flags
& CRYPTO_ALG_KERN_DRIVER_ONLY
)
756 siop
->flags
|= SIOP_FLAG_KERNEL_DRIVER_ONLY
;
758 if (is_known_accelerated(tfm
))
759 siop
->flags
|= SIOP_FLAG_KERNEL_DRIVER_ONLY
;
762 if (ses_ptr
->hdata
.init
) {
763 tfm
= crypto_ahash_tfm(ses_ptr
->hdata
.async
.s
);
764 tfm_info_to_alg_info(&siop
->hash_info
, tfm
);
765 #ifdef CRYPTO_ALG_KERN_DRIVER_ONLY
766 if (tfm
->__crt_alg
->cra_flags
& CRYPTO_ALG_KERN_DRIVER_ONLY
)
767 siop
->flags
|= SIOP_FLAG_KERNEL_DRIVER_ONLY
;
769 if (is_known_accelerated(tfm
))
770 siop
->flags
|= SIOP_FLAG_KERNEL_DRIVER_ONLY
;
774 siop
->alignmask
= ses_ptr
->alignmask
;
776 crypto_put_session(ses_ptr
);
781 cryptodev_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg_
)
783 void __user
*arg
= (void __user
*)arg_
;
785 struct session_op sop
;
786 struct kernel_crypt_op kcop
;
787 struct kernel_crypt_auth_op kcaop
;
788 struct crypt_priv
*pcr
= filp
->private_data
;
790 struct session_info_op siop
;
801 return put_user(0, p
);
804 ret
= put_user(fd
, p
);
811 if (unlikely(copy_from_user(&sop
, arg
, sizeof(sop
))))
814 ret
= crypto_create_session(fcr
, &sop
);
817 ret
= copy_to_user(arg
, &sop
, sizeof(sop
));
819 crypto_finish_session(fcr
, sop
.ses
);
824 ret
= get_user(ses
, (uint32_t __user
*)arg
);
827 ret
= crypto_finish_session(fcr
, ses
);
830 if (unlikely(copy_from_user(&siop
, arg
, sizeof(siop
))))
833 ret
= get_session_info(fcr
, &siop
);
836 return copy_to_user(arg
, &siop
, sizeof(siop
));
838 if (unlikely(ret
= kcop_from_user(&kcop
, fcr
, arg
))) {
839 dprintk(1, KERN_WARNING
, "Error copying from user");
843 ret
= crypto_run(fcr
, &kcop
);
845 dprintk(1, KERN_WARNING
, "Error in crypto_run");
849 return kcop_to_user(&kcop
, fcr
, arg
);
851 if (unlikely(ret
= kcaop_from_user(&kcaop
, fcr
, arg
))) {
852 dprintk(1, KERN_WARNING
, "Error copying from user");
856 ret
= crypto_auth_run(fcr
, &kcaop
);
858 dprintk(1, KERN_WARNING
, "Error in crypto_auth_run");
861 return kcaop_to_user(&kcaop
, fcr
, arg
);
863 if (unlikely(ret
= kcop_from_user(&kcop
, fcr
, arg
)))
866 return crypto_async_run(pcr
, &kcop
);
868 ret
= crypto_async_fetch(pcr
, &kcop
);
872 return kcop_to_user(&kcop
, fcr
, arg
);
878 /* compatibility code for 32bit userlands */
882 compat_to_session_op(struct compat_session_op
*compat
, struct session_op
*sop
)
884 sop
->cipher
= compat
->cipher
;
885 sop
->mac
= compat
->mac
;
886 sop
->keylen
= compat
->keylen
;
888 sop
->key
= compat_ptr(compat
->key
);
889 sop
->mackeylen
= compat
->mackeylen
;
890 sop
->mackey
= compat_ptr(compat
->mackey
);
891 sop
->ses
= compat
->ses
;
895 session_op_to_compat(struct session_op
*sop
, struct compat_session_op
*compat
)
897 compat
->cipher
= sop
->cipher
;
898 compat
->mac
= sop
->mac
;
899 compat
->keylen
= sop
->keylen
;
901 compat
->key
= ptr_to_compat(sop
->key
);
902 compat
->mackeylen
= sop
->mackeylen
;
903 compat
->mackey
= ptr_to_compat(sop
->mackey
);
904 compat
->ses
= sop
->ses
;
908 compat_to_crypt_op(struct compat_crypt_op
*compat
, struct crypt_op
*cop
)
910 cop
->ses
= compat
->ses
;
911 cop
->op
= compat
->op
;
912 cop
->flags
= compat
->flags
;
913 cop
->len
= compat
->len
;
915 cop
->src
= compat_ptr(compat
->src
);
916 cop
->dst
= compat_ptr(compat
->dst
);
917 cop
->mac
= compat_ptr(compat
->mac
);
918 cop
->iv
= compat_ptr(compat
->iv
);
922 crypt_op_to_compat(struct crypt_op
*cop
, struct compat_crypt_op
*compat
)
924 compat
->ses
= cop
->ses
;
925 compat
->op
= cop
->op
;
926 compat
->flags
= cop
->flags
;
927 compat
->len
= cop
->len
;
929 compat
->src
= ptr_to_compat(cop
->src
);
930 compat
->dst
= ptr_to_compat(cop
->dst
);
931 compat
->mac
= ptr_to_compat(cop
->mac
);
932 compat
->iv
= ptr_to_compat(cop
->iv
);
935 static int compat_kcop_from_user(struct kernel_crypt_op
*kcop
,
936 struct fcrypt
*fcr
, void __user
*arg
)
938 struct compat_crypt_op compat_cop
;
940 if (unlikely(copy_from_user(&compat_cop
, arg
, sizeof(compat_cop
))))
942 compat_to_crypt_op(&compat_cop
, &kcop
->cop
);
944 return fill_kcop_from_cop(kcop
, fcr
);
947 static int compat_kcop_to_user(struct kernel_crypt_op
*kcop
,
948 struct fcrypt
*fcr
, void __user
*arg
)
951 struct compat_crypt_op compat_cop
;
953 ret
= fill_cop_from_kcop(kcop
, fcr
);
955 dprintk(1, KERN_WARNING
, "Error in fill_cop_from_kcop");
958 crypt_op_to_compat(&kcop
->cop
, &compat_cop
);
960 if (unlikely(copy_to_user(arg
, &compat_cop
, sizeof(compat_cop
)))) {
961 dprintk(1, KERN_WARNING
, "Error copying to user");
968 cryptodev_compat_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg_
)
970 void __user
*arg
= (void __user
*)arg_
;
971 struct crypt_priv
*pcr
= file
->private_data
;
973 struct session_op sop
;
974 struct compat_session_op compat_sop
;
975 struct kernel_crypt_op kcop
;
988 return cryptodev_ioctl(file
, cmd
, arg_
);
990 case COMPAT_CIOCGSESSION
:
991 if (unlikely(copy_from_user(&compat_sop
, arg
,
992 sizeof(compat_sop
))))
994 compat_to_session_op(&compat_sop
, &sop
);
996 ret
= crypto_create_session(fcr
, &sop
);
1000 session_op_to_compat(&sop
, &compat_sop
);
1001 ret
= copy_to_user(arg
, &compat_sop
, sizeof(compat_sop
));
1002 if (unlikely(ret
)) {
1003 crypto_finish_session(fcr
, sop
.ses
);
1008 case COMPAT_CIOCCRYPT
:
1009 ret
= compat_kcop_from_user(&kcop
, fcr
, arg
);
1013 ret
= crypto_run(fcr
, &kcop
);
1017 return compat_kcop_to_user(&kcop
, fcr
, arg
);
1018 case COMPAT_CIOCASYNCCRYPT
:
1019 if (unlikely(ret
= compat_kcop_from_user(&kcop
, fcr
, arg
)))
1022 return crypto_async_run(pcr
, &kcop
);
1023 case COMPAT_CIOCASYNCFETCH
:
1024 ret
= crypto_async_fetch(pcr
, &kcop
);
1028 return compat_kcop_to_user(&kcop
, fcr
, arg
);
1035 #endif /* CONFIG_COMPAT */
1037 static unsigned int cryptodev_poll(struct file
*file
, poll_table
*wait
)
1039 struct crypt_priv
*pcr
= file
->private_data
;
1042 poll_wait(file
, &pcr
->user_waiter
, wait
);
1044 if (!list_empty_careful(&pcr
->done
.list
))
1045 ret
|= POLLIN
| POLLRDNORM
;
1046 if (!list_empty_careful(&pcr
->free
.list
) || pcr
->itemcount
< MAX_COP_RINGSIZE
)
1047 ret
|= POLLOUT
| POLLWRNORM
;
1052 static const struct file_operations cryptodev_fops
= {
1053 .owner
= THIS_MODULE
,
1054 .open
= cryptodev_open
,
1055 .release
= cryptodev_release
,
1056 .unlocked_ioctl
= cryptodev_ioctl
,
1057 #ifdef CONFIG_COMPAT
1058 .compat_ioctl
= cryptodev_compat_ioctl
,
1059 #endif /* CONFIG_COMPAT */
1060 .poll
= cryptodev_poll
,
1063 static struct miscdevice cryptodev
= {
1064 .minor
= MISC_DYNAMIC_MINOR
,
1066 .fops
= &cryptodev_fops
,
1067 .mode
= S_IRUSR
|S_IWUSR
|S_IRGRP
|S_IWGRP
|S_IROTH
|S_IWOTH
,
1071 cryptodev_register(void)
1075 rc
= misc_register(&cryptodev
);
1077 printk(KERN_ERR PFX
"registration of /dev/crypto failed\n");
1085 cryptodev_deregister(void)
1087 misc_deregister(&cryptodev
);
1090 /* ====== Module init/exit ====== */
1091 static int __init
init_cryptodev(void)
1095 cryptodev_wq
= create_workqueue("cryptodev_queue");
1096 if (unlikely(!cryptodev_wq
)) {
1097 printk(KERN_ERR PFX
"failed to allocate the cryptodev workqueue\n");
1101 rc
= cryptodev_register();
1103 destroy_workqueue(cryptodev_wq
);
1107 printk(KERN_INFO PFX
"driver %s loaded.\n", VERSION
);
1112 static void __exit
exit_cryptodev(void)
1114 flush_workqueue(cryptodev_wq
);
1115 destroy_workqueue(cryptodev_wq
);
1117 cryptodev_deregister();
1118 printk(KERN_INFO PFX
"driver unloaded.\n");
1121 module_init(init_cryptodev
);
1122 module_exit(exit_cryptodev
);