2 * Driver for /dev/crypto device (aka CryptoDev)
4 * Copyright (c) 2004 Michal Ludvig <mludvig@logix.net.nz>, SuSE Labs
5 * Copyright (c) 2009,2010 Nikos Mavrogiannopoulos <nmav@gnutls.org>
7 * This file is part of linux cryptodev.
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
22 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
26 * Device /dev/crypto provides an interface for
27 * accessing kernel CryptoAPI algorithms (ciphers,
28 * hashes) from userspace programs.
30 * /dev/crypto interface was originally introduced in
31 * OpenBSD and this module attempts to keep the API.
35 #include <crypto/hash.h>
36 #include <linux/crypto.h>
38 #include <linux/highmem.h>
39 #include <linux/ioctl.h>
40 #include <linux/random.h>
41 #include <linux/syscalls.h>
42 #include <linux/pagemap.h>
43 #include <linux/poll.h>
44 #include <linux/uaccess.h>
45 #include <crypto/cryptodev.h>
46 #include <linux/scatterlist.h>
47 #include "cryptodev_int.h"
50 MODULE_AUTHOR("Nikos Mavrogiannopoulos <nmav@gnutls.org>");
51 MODULE_DESCRIPTION("CryptoDev driver");
52 MODULE_LICENSE("GPL");
54 /* ====== Compile-time config ====== */
56 /* Default (pre-allocated) and maximum size of the job queue.
57 * These are free, pending and done items all together. */
58 #define DEF_COP_RINGSIZE 16
59 #define MAX_COP_RINGSIZE 64
61 /* ====== Module parameters ====== */
63 int cryptodev_verbosity
;
64 module_param(cryptodev_verbosity
, int, 0644);
65 MODULE_PARM_DESC(cryptodev_verbosity
, "0: normal, 1: verbose, 2: debug");
67 #ifdef CRYPTODEV_STATS
68 static int enable_stats
;
69 module_param(enable_stats
, int, 0644);
70 MODULE_PARM_DESC(enable_stats
, "collect statictics about cryptodev usage");
73 /* ====== CryptoAPI ====== */
74 struct todo_list_item
{
75 struct list_head __hook
;
76 struct kernel_crypt_op kcop
;
81 struct list_head list
;
87 struct locked_list free
, todo
, done
;
89 struct work_struct cryptask
;
90 wait_queue_head_t user_waiter
;
93 #define FILL_SG(sg, ptr, len) \
95 (sg)->page = virt_to_page(ptr); \
96 (sg)->offset = offset_in_page(ptr); \
98 (sg)->dma_address = 0; \
101 /* cryptodev's own workqueue, keeps crypto tasks from disturbing the force */
102 static struct workqueue_struct
*cryptodev_wq
;
104 /* Prepare session for future use. */
106 crypto_create_session(struct fcrypt
*fcr
, struct session_op
*sop
)
108 struct csession
*ses_new
= NULL
, *ses_ptr
;
110 const char *alg_name
= NULL
;
111 const char *hash_name
= NULL
;
112 int hmac_mode
= 1, stream
= 0;
114 /* Does the request make sense? */
115 if (unlikely(!sop
->cipher
&& !sop
->mac
)) {
116 dprintk(1, KERN_DEBUG
, "Both 'cipher' and 'mac' unset.\n");
120 switch (sop
->cipher
) {
124 alg_name
= "cbc(des)";
126 case CRYPTO_3DES_CBC
:
127 alg_name
= "cbc(des3_ede)";
130 alg_name
= "cbc(blowfish)";
133 alg_name
= "cbc(aes)";
136 alg_name
= "ecb(aes)";
138 case CRYPTO_CAMELLIA_CBC
:
139 alg_name
= "cbc(camelia)";
142 alg_name
= "ctr(aes)";
146 alg_name
= "ecb(cipher_null)";
150 dprintk(1, KERN_DEBUG
, "%s: bad cipher: %d\n", __func__
,
158 case CRYPTO_MD5_HMAC
:
159 hash_name
= "hmac(md5)";
161 case CRYPTO_RIPEMD160_HMAC
:
162 hash_name
= "hmac(rmd160)";
164 case CRYPTO_SHA1_HMAC
:
165 hash_name
= "hmac(sha1)";
167 case CRYPTO_SHA2_256_HMAC
:
168 hash_name
= "hmac(sha256)";
170 case CRYPTO_SHA2_384_HMAC
:
171 hash_name
= "hmac(sha384)";
173 case CRYPTO_SHA2_512_HMAC
:
174 hash_name
= "hmac(sha512)";
182 case CRYPTO_RIPEMD160
:
183 hash_name
= "rmd160";
190 case CRYPTO_SHA2_256
:
191 hash_name
= "sha256";
194 case CRYPTO_SHA2_384
:
195 hash_name
= "sha384";
198 case CRYPTO_SHA2_512
:
199 hash_name
= "sha512";
204 dprintk(1, KERN_DEBUG
, "%s: bad mac: %d\n", __func__
,
209 /* Create a session and put it to the list. */
210 ses_new
= kzalloc(sizeof(*ses_new
), GFP_KERNEL
);
214 /* Set-up crypto transform. */
216 uint8_t keyp
[CRYPTO_CIPHER_MAX_KEY_LEN
];
218 if (unlikely(sop
->keylen
> CRYPTO_CIPHER_MAX_KEY_LEN
)) {
219 dprintk(1, KERN_DEBUG
,
220 "Setting key failed for %s-%zu.\n",
221 alg_name
, (size_t)sop
->keylen
*8);
226 if (unlikely(copy_from_user(keyp
, sop
->key
, sop
->keylen
))) {
231 ret
= cryptodev_cipher_init(&ses_new
->cdata
, alg_name
, keyp
,
232 sop
->keylen
, stream
);
234 dprintk(1, KERN_DEBUG
,
235 "%s: Failed to load cipher for %s\n",
243 uint8_t keyp
[CRYPTO_HMAC_MAX_KEY_LEN
];
245 if (unlikely(sop
->mackeylen
> CRYPTO_HMAC_MAX_KEY_LEN
)) {
246 dprintk(1, KERN_DEBUG
,
247 "Setting key failed for %s-%zu.\n",
248 alg_name
, (size_t)sop
->mackeylen
*8);
253 if (sop
->mackey
&& unlikely(copy_from_user(keyp
, sop
->mackey
,
259 ret
= cryptodev_hash_init(&ses_new
->hdata
, hash_name
, hmac_mode
,
260 keyp
, sop
->mackeylen
);
262 dprintk(1, KERN_DEBUG
,
263 "%s: Failed to load hash for %s\n",
264 __func__
, hash_name
);
270 ses_new
->alignmask
= max(ses_new
->cdata
.alignmask
,
271 ses_new
->hdata
.alignmask
);
272 dprintk(2, KERN_DEBUG
, "%s: got alignmask %d\n", __func__
, ses_new
->alignmask
);
274 ses_new
->array_size
= DEFAULT_PREALLOC_PAGES
;
275 dprintk(2, KERN_DEBUG
, "%s: preallocating for %d user pages\n",
276 __func__
, ses_new
->array_size
);
277 ses_new
->pages
= kzalloc(ses_new
->array_size
*
278 sizeof(struct page
*), GFP_KERNEL
);
279 ses_new
->sg
= kzalloc(ses_new
->array_size
*
280 sizeof(struct scatterlist
), GFP_KERNEL
);
281 if (ses_new
->sg
== NULL
|| ses_new
->pages
== NULL
) {
282 dprintk(0, KERN_DEBUG
, "Memory error\n");
287 /* put the new session to the list */
288 get_random_bytes(&ses_new
->sid
, sizeof(ses_new
->sid
));
289 mutex_init(&ses_new
->sem
);
291 mutex_lock(&fcr
->sem
);
293 list_for_each_entry(ses_ptr
, &fcr
->list
, entry
) {
294 /* Check for duplicate SID */
295 if (unlikely(ses_new
->sid
== ses_ptr
->sid
)) {
296 get_random_bytes(&ses_new
->sid
, sizeof(ses_new
->sid
));
297 /* Unless we have a broken RNG this
298 shouldn't loop forever... ;-) */
303 list_add(&ses_new
->entry
, &fcr
->list
);
304 mutex_unlock(&fcr
->sem
);
306 /* Fill in some values for the user. */
307 sop
->ses
= ses_new
->sid
;
312 cryptodev_cipher_deinit(&ses_new
->cdata
);
314 kfree(ses_new
->pages
);
322 /* Everything that needs to be done when remowing a session. */
324 crypto_destroy_session(struct csession
*ses_ptr
)
326 if (!mutex_trylock(&ses_ptr
->sem
)) {
327 dprintk(2, KERN_DEBUG
, "Waiting for semaphore of sid=0x%08X\n",
329 mutex_lock(&ses_ptr
->sem
);
331 dprintk(2, KERN_DEBUG
, "Removed session 0x%08X\n", ses_ptr
->sid
);
332 #if defined(CRYPTODEV_STATS)
334 dprintk(2, KERN_DEBUG
,
335 "Usage in Bytes: enc=%llu, dec=%llu, "
336 "max=%zu, avg=%lu, cnt=%zu\n",
337 ses_ptr
->stat
[COP_ENCRYPT
], ses_ptr
->stat
[COP_DECRYPT
],
338 ses_ptr
->stat_max_size
, ses_ptr
->stat_count
> 0
339 ? ((unsigned long)(ses_ptr
->stat
[COP_ENCRYPT
]+
340 ses_ptr
->stat
[COP_DECRYPT
]) /
341 ses_ptr
->stat_count
) : 0,
342 ses_ptr
->stat_count
);
344 cryptodev_cipher_deinit(&ses_ptr
->cdata
);
345 cryptodev_hash_deinit(&ses_ptr
->hdata
);
346 dprintk(2, KERN_DEBUG
, "%s: freeing space for %d user pages\n",
347 __func__
, ses_ptr
->array_size
);
348 kfree(ses_ptr
->pages
);
350 mutex_unlock(&ses_ptr
->sem
);
354 /* Look up a session by ID and remove. */
356 crypto_finish_session(struct fcrypt
*fcr
, uint32_t sid
)
358 struct csession
*tmp
, *ses_ptr
;
359 struct list_head
*head
;
362 mutex_lock(&fcr
->sem
);
364 list_for_each_entry_safe(ses_ptr
, tmp
, head
, entry
) {
365 if (ses_ptr
->sid
== sid
) {
366 list_del(&ses_ptr
->entry
);
367 crypto_destroy_session(ses_ptr
);
372 if (unlikely(!ses_ptr
)) {
373 dprintk(1, KERN_ERR
, "Session with sid=0x%08X not found!\n",
377 mutex_unlock(&fcr
->sem
);
382 /* Remove all sessions when closing the file */
384 crypto_finish_all_sessions(struct fcrypt
*fcr
)
386 struct csession
*tmp
, *ses_ptr
;
387 struct list_head
*head
;
389 mutex_lock(&fcr
->sem
);
392 list_for_each_entry_safe(ses_ptr
, tmp
, head
, entry
) {
393 list_del(&ses_ptr
->entry
);
394 crypto_destroy_session(ses_ptr
);
396 mutex_unlock(&fcr
->sem
);
401 /* Look up session by session ID. The returned session is locked. */
403 crypto_get_session_by_sid(struct fcrypt
*fcr
, uint32_t sid
)
405 struct csession
*ses_ptr
, *retval
= 0;
407 mutex_lock(&fcr
->sem
);
408 list_for_each_entry(ses_ptr
, &fcr
->list
, entry
) {
409 if (ses_ptr
->sid
== sid
) {
410 mutex_lock(&ses_ptr
->sem
);
415 mutex_unlock(&fcr
->sem
);
421 hash_n_crypt(struct csession
*ses_ptr
, struct crypt_op
*cop
,
422 struct scatterlist
*src_sg
, struct scatterlist
*dst_sg
,
427 /* Always hash before encryption and after decryption. Maybe
428 * we should introduce a flag to switch... TBD later on.
430 if (cop
->op
== COP_ENCRYPT
) {
431 if (ses_ptr
->hdata
.init
!= 0) {
432 ret
= cryptodev_hash_update(&ses_ptr
->hdata
,
437 if (ses_ptr
->cdata
.init
!= 0) {
438 ret
= cryptodev_cipher_encrypt(&ses_ptr
->cdata
,
439 src_sg
, dst_sg
, len
);
445 if (ses_ptr
->cdata
.init
!= 0) {
446 ret
= cryptodev_cipher_decrypt(&ses_ptr
->cdata
,
447 src_sg
, dst_sg
, len
);
453 if (ses_ptr
->hdata
.init
!= 0) {
454 ret
= cryptodev_hash_update(&ses_ptr
->hdata
,
462 dprintk(0, KERN_ERR
, "CryptoAPI failure: %d\n", ret
);
467 /* This is the main crypto function - feed it with plaintext
468 and get a ciphertext (or vice versa :-) */
470 __crypto_run_std(struct csession
*ses_ptr
, struct crypt_op
*cop
)
473 char __user
*src
, *dst
;
474 struct scatterlist sg
;
475 size_t nbytes
, bufsize
;
479 data
= (char *)__get_free_page(GFP_KERNEL
);
484 bufsize
= PAGE_SIZE
< nbytes
? PAGE_SIZE
: nbytes
;
490 size_t current_len
= nbytes
> bufsize
? bufsize
: nbytes
;
492 if (unlikely(copy_from_user(data
, src
, current_len
))) {
497 sg_init_one(&sg
, data
, current_len
);
499 ret
= hash_n_crypt(ses_ptr
, cop
, &sg
, &sg
, current_len
);
504 if (ses_ptr
->cdata
.init
!= 0) {
505 if (unlikely(copy_to_user(dst
, data
, current_len
))) {
512 nbytes
-= current_len
;
516 free_page((unsigned long)data
);
520 void release_user_pages(struct page
**pg
, int pagecount
)
522 while (pagecount
--) {
523 if (!PageReserved(pg
[pagecount
]))
524 SetPageDirty(pg
[pagecount
]);
525 page_cache_release(pg
[pagecount
]);
529 /* offset of buf in it's first page */
530 #define PAGEOFFSET(buf) ((unsigned long)buf & ~PAGE_MASK)
532 /* fetch the pages addr resides in into pg and initialise sg with them */
533 int __get_userbuf(uint8_t __user
*addr
, uint32_t len
, int write
,
534 int pgcount
, struct page
**pg
, struct scatterlist
*sg
,
535 struct task_struct
*task
, struct mm_struct
*mm
)
537 int ret
, pglen
, i
= 0;
538 struct scatterlist
*sgp
;
540 down_write(&mm
->mmap_sem
);
541 ret
= get_user_pages(task
, mm
,
542 (unsigned long)addr
, pgcount
, write
, 0, pg
, NULL
);
543 up_write(&mm
->mmap_sem
);
547 sg_init_table(sg
, pgcount
);
549 pglen
= min((ptrdiff_t)(PAGE_SIZE
- PAGEOFFSET(addr
)), (ptrdiff_t)len
);
550 sg_set_page(sg
, pg
[i
++], pglen
, PAGEOFFSET(addr
));
553 for (sgp
= sg_next(sg
); len
; sgp
= sg_next(sgp
)) {
554 pglen
= min((uint32_t)PAGE_SIZE
, len
);
555 sg_set_page(sgp
, pg
[i
++], pglen
, 0);
558 sg_mark_end(sg_last(sg
, pgcount
));
562 int adjust_sg_array(struct csession
* ses
, int pagecount
)
564 struct scatterlist
*sg
;
568 for (array_size
= ses
->array_size
; array_size
< pagecount
;
572 dprintk(2, KERN_DEBUG
, "%s: reallocating to %d elements\n",
573 __func__
, array_size
);
574 pages
= krealloc(ses
->pages
, array_size
* sizeof(struct page
*),
576 if (unlikely(!pages
))
579 sg
= krealloc(ses
->sg
, array_size
* sizeof(struct scatterlist
),
584 ses
->array_size
= array_size
;
589 /* make cop->src and cop->dst available in scatterlists */
590 static int get_userbuf(struct csession
*ses
, struct kernel_crypt_op
*kcop
,
591 struct scatterlist
**src_sg
, struct scatterlist
**dst_sg
,
594 int src_pagecount
, dst_pagecount
= 0, pagecount
, write_src
= 1;
595 struct crypt_op
*cop
= &kcop
->cop
;
598 if (cop
->src
== NULL
)
601 if (ses
->alignmask
&& !IS_ALIGNED((unsigned long)cop
->src
, ses
->alignmask
)) {
602 dprintk(2, KERN_WARNING
, "%s: careful - source address %lx is not %d byte aligned\n",
603 __func__
, (unsigned long)cop
->src
, ses
->alignmask
+ 1);
606 src_pagecount
= PAGECOUNT(cop
->src
, cop
->len
);
607 if (!ses
->cdata
.init
) { /* hashing only */
609 } else if (cop
->src
!= cop
->dst
) { /* non-in-situ transformation */
610 if (cop
->dst
== NULL
)
613 dst_pagecount
= PAGECOUNT(cop
->dst
, cop
->len
);
616 if (ses
->alignmask
&& !IS_ALIGNED((unsigned long)cop
->dst
, ses
->alignmask
)) {
617 dprintk(2, KERN_WARNING
, "%s: careful - destination address %lx is not %d byte aligned\n",
618 __func__
, (unsigned long)cop
->dst
, ses
->alignmask
+ 1);
622 (*tot_pages
) = pagecount
= src_pagecount
+ dst_pagecount
;
624 if (pagecount
> ses
->array_size
) {
625 rc
= adjust_sg_array(ses
, pagecount
);
630 rc
= __get_userbuf(cop
->src
, cop
->len
, write_src
, src_pagecount
,
631 ses
->pages
, ses
->sg
, kcop
->task
, kcop
->mm
);
634 "failed to get user pages for data input\n");
637 (*src_sg
) = (*dst_sg
) = ses
->sg
;
642 (*dst_sg
) = ses
->sg
+ src_pagecount
;
644 rc
= __get_userbuf(cop
->dst
, cop
->len
, 1, dst_pagecount
,
645 ses
->pages
+ src_pagecount
, *dst_sg
,
646 kcop
->task
, kcop
->mm
);
649 "failed to get user pages for data output\n");
650 release_user_pages(ses
->pages
, src_pagecount
);
656 /* This is the main crypto function - zero-copy edition */
658 __crypto_run_zc(struct csession
*ses_ptr
, struct kernel_crypt_op
*kcop
)
660 struct scatterlist
*src_sg
, *dst_sg
;
661 struct crypt_op
*cop
= &kcop
->cop
;
662 int ret
= 0, pagecount
;
664 ret
= get_userbuf(ses_ptr
, kcop
, &src_sg
, &dst_sg
, &pagecount
);
666 dprintk(1, KERN_ERR
, "Error getting user pages. "
667 "Falling back to non zero copy.\n");
668 return __crypto_run_std(ses_ptr
, cop
);
671 ret
= hash_n_crypt(ses_ptr
, cop
, src_sg
, dst_sg
, cop
->len
);
673 release_user_pages(ses_ptr
->pages
, pagecount
);
677 static int crypto_run(struct fcrypt
*fcr
, struct kernel_crypt_op
*kcop
)
679 struct csession
*ses_ptr
;
680 struct crypt_op
*cop
= &kcop
->cop
;
683 if (unlikely(cop
->op
!= COP_ENCRYPT
&& cop
->op
!= COP_DECRYPT
)) {
684 dprintk(1, KERN_DEBUG
, "invalid operation op=%u\n", cop
->op
);
688 /* this also enters ses_ptr->sem */
689 ses_ptr
= crypto_get_session_by_sid(fcr
, cop
->ses
);
690 if (unlikely(!ses_ptr
)) {
691 dprintk(1, KERN_ERR
, "invalid session ID=0x%08X\n", cop
->ses
);
695 if (ses_ptr
->hdata
.init
!= 0 && !(cop
->flags
& (COP_FLAG_UPDATE
| COP_FLAG_FINAL
))) {
696 ret
= cryptodev_hash_reset(&ses_ptr
->hdata
);
699 "error in cryptodev_hash_reset()\n");
704 if (ses_ptr
->cdata
.init
!= 0) {
705 int blocksize
= ses_ptr
->cdata
.blocksize
;
707 if (unlikely(cop
->len
% blocksize
)) {
709 "data size (%u) isn't a multiple "
710 "of block size (%u)\n",
711 cop
->len
, blocksize
);
716 cryptodev_cipher_set_iv(&ses_ptr
->cdata
, kcop
->iv
,
717 min(ses_ptr
->cdata
.ivsize
, kcop
->ivlen
));
720 if (likely(cop
->len
)) {
721 if (cop
->flags
& COP_FLAG_NO_ZC
)
722 ret
= __crypto_run_std(ses_ptr
, &kcop
->cop
);
724 ret
= __crypto_run_zc(ses_ptr
, kcop
);
729 if (ses_ptr
->cdata
.init
!= 0) {
730 cryptodev_cipher_get_iv(&ses_ptr
->cdata
, kcop
->iv
,
731 min(ses_ptr
->cdata
.ivsize
, kcop
->ivlen
));
734 if (ses_ptr
->hdata
.init
!= 0 &&
735 ((cop
->flags
& COP_FLAG_FINAL
) ||
736 (!(cop
->flags
& COP_FLAG_UPDATE
) || cop
->len
== 0))) {
738 ret
= cryptodev_hash_final(&ses_ptr
->hdata
, kcop
->hash_output
);
740 dprintk(0, KERN_ERR
, "CryptoAPI failure: %d\n", ret
);
743 kcop
->digestsize
= ses_ptr
->hdata
.digestsize
;
746 #if defined(CRYPTODEV_STATS)
748 /* this is safe - we check cop->op at the function entry */
749 ses_ptr
->stat
[cop
->op
] += cop
->len
;
750 if (ses_ptr
->stat_max_size
< cop
->len
)
751 ses_ptr
->stat_max_size
= cop
->len
;
752 ses_ptr
->stat_count
++;
757 crypto_put_session(ses_ptr
);
761 static void cryptask_routine(struct work_struct
*work
)
763 struct crypt_priv
*pcr
= container_of(work
, struct crypt_priv
, cryptask
);
764 struct todo_list_item
*item
;
767 /* fetch all pending jobs into the temporary list */
768 mutex_lock(&pcr
->todo
.lock
);
769 list_cut_position(&tmp
, &pcr
->todo
.list
, pcr
->todo
.list
.prev
);
770 mutex_unlock(&pcr
->todo
.lock
);
772 /* handle each job locklessly */
773 list_for_each_entry(item
, &tmp
, __hook
) {
774 item
->result
= crypto_run(&pcr
->fcrypt
, &item
->kcop
);
775 if (unlikely(item
->result
))
776 dprintk(0, KERN_ERR
, "%s: crypto_run() failed: %d\n",
777 __func__
, item
->result
);
780 /* push all handled jobs to the done list at once */
781 mutex_lock(&pcr
->done
.lock
);
782 list_splice_tail(&tmp
, &pcr
->done
.list
);
783 mutex_unlock(&pcr
->done
.lock
);
785 /* wake for POLLIN */
786 wake_up_interruptible(&pcr
->user_waiter
);
789 /* ====== /dev/crypto ====== */
792 cryptodev_open(struct inode
*inode
, struct file
*filp
)
794 struct todo_list_item
*tmp
;
795 struct crypt_priv
*pcr
;
798 pcr
= kmalloc(sizeof(*pcr
), GFP_KERNEL
);
802 memset(pcr
, 0, sizeof(*pcr
));
803 mutex_init(&pcr
->fcrypt
.sem
);
804 INIT_LIST_HEAD(&pcr
->fcrypt
.list
);
806 INIT_LIST_HEAD(&pcr
->free
.list
);
807 INIT_LIST_HEAD(&pcr
->todo
.list
);
808 INIT_LIST_HEAD(&pcr
->done
.list
);
809 INIT_WORK(&pcr
->cryptask
, cryptask_routine
);
810 mutex_init(&pcr
->free
.lock
);
811 mutex_init(&pcr
->todo
.lock
);
812 mutex_init(&pcr
->done
.lock
);
813 init_waitqueue_head(&pcr
->user_waiter
);
815 for (i
= 0; i
< DEF_COP_RINGSIZE
; i
++) {
816 tmp
= kzalloc(sizeof(struct todo_list_item
), GFP_KERNEL
);
818 dprintk(2, KERN_DEBUG
, "%s: allocated new item at %lx\n",
819 __func__
, (unsigned long)tmp
);
820 list_add(&tmp
->__hook
, &pcr
->free
.list
);
823 filp
->private_data
= pcr
;
824 dprintk(2, KERN_DEBUG
,
825 "Cryptodev handle initialised, %d elements in queue\n",
831 cryptodev_release(struct inode
*inode
, struct file
*filp
)
833 struct crypt_priv
*pcr
= filp
->private_data
;
834 struct todo_list_item
*item
, *item_safe
;
840 cancel_work_sync(&pcr
->cryptask
);
842 mutex_destroy(&pcr
->todo
.lock
);
843 mutex_destroy(&pcr
->done
.lock
);
844 mutex_destroy(&pcr
->free
.lock
);
846 list_splice_tail(&pcr
->todo
.list
, &pcr
->free
.list
);
847 list_splice_tail(&pcr
->done
.list
, &pcr
->free
.list
);
849 list_for_each_entry_safe(item
, item_safe
, &pcr
->free
.list
, __hook
) {
850 dprintk(2, KERN_DEBUG
, "%s: freeing item at %lx\n",
851 __func__
, (unsigned long)item
);
852 list_del(&item
->__hook
);
857 if (items_freed
!= pcr
->itemcount
) {
859 "%s: freed %d items, but %d should exist!\n",
860 __func__
, items_freed
, pcr
->itemcount
);
863 crypto_finish_all_sessions(&pcr
->fcrypt
);
865 filp
->private_data
= NULL
;
867 dprintk(2, KERN_DEBUG
,
868 "Cryptodev handle deinitialised, %d elements freed\n",
874 clonefd(struct file
*filp
)
877 ret
= get_unused_fd();
880 fd_install(ret
, filp
);
886 /* enqueue a job for asynchronous completion
889 * -EBUSY when there are no free queue slots left
890 * (and the number of slots has reached it MAX_COP_RINGSIZE)
891 * -EFAULT when there was a memory allocation error
893 static int crypto_async_run(struct crypt_priv
*pcr
, struct kernel_crypt_op
*kcop
)
895 struct todo_list_item
*item
= NULL
;
897 mutex_lock(&pcr
->free
.lock
);
898 if (likely(!list_empty(&pcr
->free
.list
))) {
899 item
= list_first_entry(&pcr
->free
.list
,
900 struct todo_list_item
, __hook
);
901 list_del(&item
->__hook
);
902 } else if (pcr
->itemcount
< MAX_COP_RINGSIZE
) {
905 mutex_unlock(&pcr
->free
.lock
);
908 mutex_unlock(&pcr
->free
.lock
);
910 if (unlikely(!item
)) {
911 item
= kzalloc(sizeof(struct todo_list_item
), GFP_KERNEL
);
914 dprintk(1, KERN_INFO
, "%s: increased item count to %d\n",
915 __func__
, pcr
->itemcount
);
918 memcpy(&item
->kcop
, kcop
, sizeof(struct kernel_crypt_op
));
920 mutex_lock(&pcr
->todo
.lock
);
921 list_add_tail(&item
->__hook
, &pcr
->todo
.list
);
922 mutex_unlock(&pcr
->todo
.lock
);
924 queue_work(cryptodev_wq
, &pcr
->cryptask
);
928 /* get the first completed job from the "done" queue
931 * -EBUSY if no completed jobs are ready (yet)
932 * the return value of crypto_run() otherwise */
933 static int crypto_async_fetch(struct crypt_priv
*pcr
,
934 struct kernel_crypt_op
*kcop
)
936 struct todo_list_item
*item
;
939 mutex_lock(&pcr
->done
.lock
);
940 if (list_empty(&pcr
->done
.list
)) {
941 mutex_unlock(&pcr
->done
.lock
);
944 item
= list_first_entry(&pcr
->done
.list
, struct todo_list_item
, __hook
);
945 list_del(&item
->__hook
);
946 mutex_unlock(&pcr
->done
.lock
);
948 memcpy(kcop
, &item
->kcop
, sizeof(struct kernel_crypt_op
));
949 retval
= item
->result
;
951 mutex_lock(&pcr
->free
.lock
);
952 list_add_tail(&item
->__hook
, &pcr
->free
.list
);
953 mutex_unlock(&pcr
->free
.lock
);
955 /* wake for POLLOUT */
956 wake_up_interruptible(&pcr
->user_waiter
);
961 /* this function has to be called from process context */
962 static int fill_kcop_from_cop(struct kernel_crypt_op
*kcop
, struct fcrypt
*fcr
)
964 struct crypt_op
*cop
= &kcop
->cop
;
965 struct csession
*ses_ptr
;
968 /* this also enters ses_ptr->sem */
969 ses_ptr
= crypto_get_session_by_sid(fcr
, cop
->ses
);
970 if (unlikely(!ses_ptr
)) {
971 dprintk(1, KERN_ERR
, "invalid session ID=0x%08X\n", cop
->ses
);
974 kcop
->ivlen
= cop
->iv
? ses_ptr
->cdata
.ivsize
: 0;
975 kcop
->digestsize
= 0; /* will be updated during operation */
977 crypto_put_session(ses_ptr
);
979 kcop
->task
= current
;
980 kcop
->mm
= current
->mm
;
983 rc
= copy_from_user(kcop
->iv
, cop
->iv
, kcop
->ivlen
);
986 "error copying IV (%d bytes), copy_from_user returned %d for address %lx\n",
987 kcop
->ivlen
, rc
, (unsigned long)cop
->iv
);
995 /* this function has to be called from process context */
996 static int fill_cop_from_kcop(struct kernel_crypt_op
*kcop
, struct fcrypt
*fcr
)
1000 if (kcop
->digestsize
) {
1001 ret
= copy_to_user(kcop
->cop
.mac
,
1002 kcop
->hash_output
, kcop
->digestsize
);
1006 if (kcop
->ivlen
&& kcop
->cop
.flags
& COP_FLAG_WRITE_IV
) {
1007 ret
= copy_to_user(kcop
->cop
.iv
,
1008 kcop
->iv
, kcop
->ivlen
);
1015 static int kcop_from_user(struct kernel_crypt_op
*kcop
,
1016 struct fcrypt
*fcr
, void __user
*arg
)
1018 if (unlikely(copy_from_user(&kcop
->cop
, arg
, sizeof(kcop
->cop
))))
1021 return fill_kcop_from_cop(kcop
, fcr
);
1024 static int kcop_to_user(struct kernel_crypt_op
*kcop
,
1025 struct fcrypt
*fcr
, void __user
*arg
)
1029 ret
= fill_cop_from_kcop(kcop
, fcr
);
1033 if (unlikely(copy_to_user(arg
, &kcop
->cop
, sizeof(kcop
->cop
))))
1038 static inline void tfm_info_to_alg_info(struct alg_info
*dst
, struct crypto_tfm
*tfm
)
1040 snprintf(dst
->cra_name
, CRYPTODEV_MAX_ALG_NAME
,
1041 "%s", crypto_tfm_alg_name(tfm
));
1042 snprintf(dst
->cra_driver_name
, CRYPTODEV_MAX_ALG_NAME
,
1043 "%s", crypto_tfm_alg_driver_name(tfm
));
1046 static int get_session_info(struct fcrypt
*fcr
, struct session_info_op
*siop
)
1048 struct csession
*ses_ptr
;
1050 /* this also enters ses_ptr->sem */
1051 ses_ptr
= crypto_get_session_by_sid(fcr
, siop
->ses
);
1052 if (unlikely(!ses_ptr
)) {
1053 dprintk(1, KERN_ERR
, "invalid session ID=0x%08X\n", siop
->ses
);
1057 if (ses_ptr
->cdata
.init
) {
1058 tfm_info_to_alg_info(&siop
->cipher_info
,
1059 crypto_ablkcipher_tfm(ses_ptr
->cdata
.async
.s
));
1061 if (ses_ptr
->hdata
.init
) {
1062 tfm_info_to_alg_info(&siop
->hash_info
,
1063 crypto_ahash_tfm(ses_ptr
->hdata
.async
.s
));
1066 siop
->alignmask
= ses_ptr
->alignmask
;
1068 crypto_put_session(ses_ptr
);
1073 cryptodev_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg_
)
1075 void __user
*arg
= (void __user
*)arg_
;
1076 int __user
*p
= arg
;
1077 struct session_op sop
;
1078 struct kernel_crypt_op kcop
;
1079 struct kernel_crypt_auth_op kcaop
;
1080 struct crypt_priv
*pcr
= filp
->private_data
;
1082 struct session_info_op siop
;
1093 return put_user(0, p
);
1096 ret
= put_user(fd
, p
);
1097 if (unlikely(ret
)) {
1103 if (unlikely(copy_from_user(&sop
, arg
, sizeof(sop
))))
1106 ret
= crypto_create_session(fcr
, &sop
);
1109 ret
= copy_to_user(arg
, &sop
, sizeof(sop
));
1110 if (unlikely(ret
)) {
1111 crypto_finish_session(fcr
, sop
.ses
);
1116 ret
= get_user(ses
, (uint32_t __user
*)arg
);
1119 ret
= crypto_finish_session(fcr
, ses
);
1122 if (unlikely(copy_from_user(&siop
, arg
, sizeof(siop
))))
1125 ret
= get_session_info(fcr
, &siop
);
1128 return copy_to_user(arg
, &siop
, sizeof(siop
));
1130 if (unlikely(ret
= kcop_from_user(&kcop
, fcr
, arg
))) {
1131 dprintk(1, KERN_WARNING
, "Error copying from user");
1135 ret
= crypto_run(fcr
, &kcop
);
1136 if (unlikely(ret
)) {
1137 dprintk(1, KERN_WARNING
, "Error in crypto_run");
1141 return kcop_to_user(&kcop
, fcr
, arg
);
1143 if (unlikely(ret
= kcaop_from_user(&kcaop
, fcr
, arg
))) {
1144 dprintk(1, KERN_WARNING
, "Error copying from user");
1148 ret
= crypto_auth_run(fcr
, &kcaop
);
1149 if (unlikely(ret
)) {
1150 dprintk(1, KERN_WARNING
, "Error in crypto_auth_run");
1153 return kcaop_to_user(&kcaop
, fcr
, arg
);
1154 case CIOCASYNCCRYPT
:
1155 if (unlikely(ret
= kcop_from_user(&kcop
, fcr
, arg
)))
1158 return crypto_async_run(pcr
, &kcop
);
1159 case CIOCASYNCFETCH
:
1160 ret
= crypto_async_fetch(pcr
, &kcop
);
1164 return kcop_to_user(&kcop
, fcr
, arg
);
1170 /* compatibility code for 32bit userlands */
1171 #ifdef CONFIG_COMPAT
1174 compat_to_session_op(struct compat_session_op
*compat
, struct session_op
*sop
)
1176 sop
->cipher
= compat
->cipher
;
1177 sop
->mac
= compat
->mac
;
1178 sop
->keylen
= compat
->keylen
;
1180 sop
->key
= compat_ptr(compat
->key
);
1181 sop
->mackeylen
= compat
->mackeylen
;
1182 sop
->mackey
= compat_ptr(compat
->mackey
);
1183 sop
->ses
= compat
->ses
;
1187 session_op_to_compat(struct session_op
*sop
, struct compat_session_op
*compat
)
1189 compat
->cipher
= sop
->cipher
;
1190 compat
->mac
= sop
->mac
;
1191 compat
->keylen
= sop
->keylen
;
1193 compat
->key
= ptr_to_compat(sop
->key
);
1194 compat
->mackeylen
= sop
->mackeylen
;
1195 compat
->mackey
= ptr_to_compat(sop
->mackey
);
1196 compat
->ses
= sop
->ses
;
1200 compat_to_crypt_op(struct compat_crypt_op
*compat
, struct crypt_op
*cop
)
1202 cop
->ses
= compat
->ses
;
1203 cop
->op
= compat
->op
;
1204 cop
->flags
= compat
->flags
;
1205 cop
->len
= compat
->len
;
1207 cop
->src
= compat_ptr(compat
->src
);
1208 cop
->dst
= compat_ptr(compat
->dst
);
1209 cop
->mac
= compat_ptr(compat
->mac
);
1210 cop
->iv
= compat_ptr(compat
->iv
);
1214 crypt_op_to_compat(struct crypt_op
*cop
, struct compat_crypt_op
*compat
)
1216 compat
->ses
= cop
->ses
;
1217 compat
->op
= cop
->op
;
1218 compat
->flags
= cop
->flags
;
1219 compat
->len
= cop
->len
;
1221 compat
->src
= ptr_to_compat(cop
->src
);
1222 compat
->dst
= ptr_to_compat(cop
->dst
);
1223 compat
->mac
= ptr_to_compat(cop
->mac
);
1224 compat
->iv
= ptr_to_compat(cop
->iv
);
1227 static int compat_kcop_from_user(struct kernel_crypt_op
*kcop
,
1228 struct fcrypt
*fcr
, void __user
*arg
)
1230 struct compat_crypt_op compat_cop
;
1232 if (unlikely(copy_from_user(&compat_cop
, arg
, sizeof(compat_cop
))))
1234 compat_to_crypt_op(&compat_cop
, &kcop
->cop
);
1236 return fill_kcop_from_cop(kcop
, fcr
);
1239 static int compat_kcop_to_user(struct kernel_crypt_op
*kcop
,
1240 struct fcrypt
*fcr
, void __user
*arg
)
1243 struct compat_crypt_op compat_cop
;
1245 ret
= fill_cop_from_kcop(kcop
, fcr
);
1246 if (unlikely(ret
)) {
1247 dprintk(1, KERN_WARNING
, "Error in fill_cop_from_kcop");
1250 crypt_op_to_compat(&kcop
->cop
, &compat_cop
);
1252 if (unlikely(copy_to_user(arg
, &compat_cop
, sizeof(compat_cop
)))) {
1253 dprintk(1, KERN_WARNING
, "Error copying to user");
1260 cryptodev_compat_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg_
)
1262 void __user
*arg
= (void __user
*)arg_
;
1263 struct crypt_priv
*pcr
= file
->private_data
;
1265 struct session_op sop
;
1266 struct compat_session_op compat_sop
;
1267 struct kernel_crypt_op kcop
;
1280 return cryptodev_ioctl(file
, cmd
, arg_
);
1282 case COMPAT_CIOCGSESSION
:
1283 if (unlikely(copy_from_user(&compat_sop
, arg
,
1284 sizeof(compat_sop
))))
1286 compat_to_session_op(&compat_sop
, &sop
);
1288 ret
= crypto_create_session(fcr
, &sop
);
1292 session_op_to_compat(&sop
, &compat_sop
);
1293 ret
= copy_to_user(arg
, &compat_sop
, sizeof(compat_sop
));
1294 if (unlikely(ret
)) {
1295 crypto_finish_session(fcr
, sop
.ses
);
1300 case COMPAT_CIOCCRYPT
:
1301 ret
= compat_kcop_from_user(&kcop
, fcr
, arg
);
1305 ret
= crypto_run(fcr
, &kcop
);
1309 return compat_kcop_to_user(&kcop
, fcr
, arg
);
1310 case COMPAT_CIOCASYNCCRYPT
:
1311 if (unlikely(ret
= compat_kcop_from_user(&kcop
, fcr
, arg
)))
1314 return crypto_async_run(pcr
, &kcop
);
1315 case COMPAT_CIOCASYNCFETCH
:
1316 ret
= crypto_async_fetch(pcr
, &kcop
);
1320 return compat_kcop_to_user(&kcop
, fcr
, arg
);
1327 #endif /* CONFIG_COMPAT */
1329 static unsigned int cryptodev_poll(struct file
*file
, poll_table
*wait
)
1331 struct crypt_priv
*pcr
= file
->private_data
;
1334 poll_wait(file
, &pcr
->user_waiter
, wait
);
1336 if (!list_empty_careful(&pcr
->done
.list
))
1337 ret
|= POLLIN
| POLLRDNORM
;
1338 if (!list_empty_careful(&pcr
->free
.list
) || pcr
->itemcount
< MAX_COP_RINGSIZE
)
1339 ret
|= POLLOUT
| POLLWRNORM
;
1344 static const struct file_operations cryptodev_fops
= {
1345 .owner
= THIS_MODULE
,
1346 .open
= cryptodev_open
,
1347 .release
= cryptodev_release
,
1348 .unlocked_ioctl
= cryptodev_ioctl
,
1349 #ifdef CONFIG_COMPAT
1350 .compat_ioctl
= cryptodev_compat_ioctl
,
1351 #endif /* CONFIG_COMPAT */
1352 .poll
= cryptodev_poll
,
1355 static struct miscdevice cryptodev
= {
1356 .minor
= MISC_DYNAMIC_MINOR
,
1358 .fops
= &cryptodev_fops
,
1362 cryptodev_register(void)
1366 rc
= misc_register(&cryptodev
);
1368 printk(KERN_ERR PFX
"registration of /dev/crypto failed\n");
1376 cryptodev_deregister(void)
1378 misc_deregister(&cryptodev
);
1381 /* ====== Module init/exit ====== */
1382 static int __init
init_cryptodev(void)
1386 cryptodev_wq
= create_workqueue("cryptodev_queue");
1387 if (unlikely(!cryptodev_wq
)) {
1388 printk(KERN_ERR PFX
"failed to allocate the cryptodev workqueue\n");
1392 rc
= cryptodev_register();
1394 destroy_workqueue(cryptodev_wq
);
1398 printk(KERN_INFO PFX
"driver %s loaded.\n", VERSION
);
1403 static void __exit
exit_cryptodev(void)
1405 flush_workqueue(cryptodev_wq
);
1406 destroy_workqueue(cryptodev_wq
);
1408 cryptodev_deregister();
1409 printk(KERN_INFO PFX
"driver unloaded.\n");
1412 module_init(init_cryptodev
);
1413 module_exit(exit_cryptodev
);