add async versions of the code samples
[cryptodev-linux.git] / cryptodev_main.c
blob5c35e9235baed312cfdad82f7126af31de3f7d39
1 /*
2 * Driver for /dev/crypto device (aka CryptoDev)
4 * Copyright (c) 2004 Michal Ludvig <mludvig@logix.net.nz>, SuSE Labs
5 * Copyright (c) 2009,2010 Nikos Mavrogiannopoulos <nmav@gnutls.org>
7 * This file is part of linux cryptodev.
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc.,
22 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
26 * Device /dev/crypto provides an interface for
27 * accessing kernel CryptoAPI algorithms (ciphers,
28 * hashes) from userspace programs.
30 * /dev/crypto interface was originally introduced in
31 * OpenBSD and this module attempts to keep the API.
35 #include <linux/crypto.h>
36 #include <linux/mm.h>
37 #include <linux/highmem.h>
38 #include <linux/ioctl.h>
39 #include <linux/random.h>
40 #include <linux/syscalls.h>
41 #include <linux/pagemap.h>
42 #include <linux/poll.h>
43 #include <linux/uaccess.h>
44 #include "cryptodev.h"
45 #include <linux/scatterlist.h>
46 #include "cryptodev_int.h"
47 #include "version.h"
49 MODULE_AUTHOR("Nikos Mavrogiannopoulos <nmav@gnutls.org>");
50 MODULE_DESCRIPTION("CryptoDev driver");
51 MODULE_LICENSE("GPL");
53 /* ====== Compile-time config ====== */
55 #define CRYPTODEV_STATS
57 /* Default (pre-allocated) and maximum size of the job queue.
58 * These are free, pending and done items all together. */
59 #define DEF_COP_RINGSIZE 16
60 #define MAX_COP_RINGSIZE 64
62 /* ====== Module parameters ====== */
64 int cryptodev_verbosity;
65 module_param(cryptodev_verbosity, int, 0644);
66 MODULE_PARM_DESC(cryptodev_verbosity, "0: normal, 1: verbose, 2: debug");
68 #ifdef CRYPTODEV_STATS
69 static int enable_stats;
70 module_param(enable_stats, int, 0644);
71 MODULE_PARM_DESC(enable_stats, "collect statictics about cryptodev usage");
72 #endif
74 /* ====== CryptoAPI ====== */
75 struct fcrypt {
76 struct list_head list;
77 struct mutex sem;
80 struct todo_list_item {
81 struct list_head __hook;
82 struct kernel_crypt_op kcop;
83 int result;
86 struct locked_list {
87 struct list_head list;
88 struct mutex lock;
91 struct crypt_priv {
92 struct fcrypt fcrypt;
93 struct locked_list free, todo, done;
94 int itemcount;
95 struct work_struct cryptask;
96 wait_queue_head_t user_waiter;
99 #define FILL_SG(sg, ptr, len) \
100 do { \
101 (sg)->page = virt_to_page(ptr); \
102 (sg)->offset = offset_in_page(ptr); \
103 (sg)->length = len; \
104 (sg)->dma_address = 0; \
105 } while (0)
107 struct csession {
108 struct list_head entry;
109 struct mutex sem;
110 struct cipher_data cdata;
111 struct hash_data hdata;
112 uint32_t sid;
113 #ifdef CRYPTODEV_STATS
114 #if !((COP_ENCRYPT < 2) && (COP_DECRYPT < 2))
115 #error Struct csession.stat uses COP_{ENCRYPT,DECRYPT} as indices. Do something!
116 #endif
117 unsigned long long stat[2];
118 size_t stat_max_size, stat_count;
119 #endif
120 int array_size;
121 struct page **pages;
122 struct scatterlist *sg;
125 /* cryptodev's own workqueue, keeps crypto tasks from disturbing the force */
126 static struct workqueue_struct *cryptodev_wq;
128 /* Prepare session for future use. */
129 static int
130 crypto_create_session(struct fcrypt *fcr, struct session_op *sop)
132 struct csession *ses_new = NULL, *ses_ptr;
133 int ret = 0;
134 const char *alg_name = NULL;
135 const char *hash_name = NULL;
136 int hmac_mode = 1;
138 /* Does the request make sense? */
139 if (unlikely(!sop->cipher && !sop->mac)) {
140 dprintk(1, KERN_DEBUG, "Both 'cipher' and 'mac' unset.\n");
141 return -EINVAL;
144 switch (sop->cipher) {
145 case 0:
146 break;
147 case CRYPTO_DES_CBC:
148 alg_name = "cbc(des)";
149 break;
150 case CRYPTO_3DES_CBC:
151 alg_name = "cbc(des3_ede)";
152 break;
153 case CRYPTO_BLF_CBC:
154 alg_name = "cbc(blowfish)";
155 break;
156 case CRYPTO_AES_CBC:
157 alg_name = "cbc(aes)";
158 break;
159 case CRYPTO_CAMELLIA_CBC:
160 alg_name = "cbc(camelia)";
161 break;
162 case CRYPTO_AES_CTR:
163 alg_name = "ctr(aes)";
164 break;
165 case CRYPTO_NULL:
166 alg_name = "ecb(cipher_null)";
167 break;
168 default:
169 dprintk(1, KERN_DEBUG, "%s: bad cipher: %d\n", __func__,
170 sop->cipher);
171 return -EINVAL;
174 switch (sop->mac) {
175 case 0:
176 break;
177 case CRYPTO_MD5_HMAC:
178 hash_name = "hmac(md5)";
179 break;
180 case CRYPTO_RIPEMD160_HMAC:
181 hash_name = "hmac(rmd160)";
182 break;
183 case CRYPTO_SHA1_HMAC:
184 hash_name = "hmac(sha1)";
185 break;
186 case CRYPTO_SHA2_256_HMAC:
187 hash_name = "hmac(sha256)";
188 break;
189 case CRYPTO_SHA2_384_HMAC:
190 hash_name = "hmac(sha384)";
191 break;
192 case CRYPTO_SHA2_512_HMAC:
193 hash_name = "hmac(sha512)";
194 break;
196 /* non-hmac cases */
197 case CRYPTO_MD5:
198 hash_name = "md5";
199 hmac_mode = 0;
200 break;
201 case CRYPTO_RIPEMD160:
202 hash_name = "rmd160";
203 hmac_mode = 0;
204 break;
205 case CRYPTO_SHA1:
206 hash_name = "sha1";
207 hmac_mode = 0;
208 break;
209 case CRYPTO_SHA2_256:
210 hash_name = "sha256";
211 hmac_mode = 0;
212 break;
213 case CRYPTO_SHA2_384:
214 hash_name = "sha384";
215 hmac_mode = 0;
216 break;
217 case CRYPTO_SHA2_512:
218 hash_name = "sha512";
219 hmac_mode = 0;
220 break;
222 default:
223 dprintk(1, KERN_DEBUG, "%s: bad mac: %d\n", __func__,
224 sop->mac);
225 return -EINVAL;
228 /* Create a session and put it to the list. */
229 ses_new = kzalloc(sizeof(*ses_new), GFP_KERNEL);
230 if (!ses_new)
231 return -ENOMEM;
233 /* Set-up crypto transform. */
234 if (alg_name) {
235 uint8_t keyp[CRYPTO_CIPHER_MAX_KEY_LEN];
237 if (unlikely(sop->keylen > CRYPTO_CIPHER_MAX_KEY_LEN)) {
238 dprintk(1, KERN_DEBUG,
239 "Setting key failed for %s-%zu.\n",
240 alg_name, (size_t)sop->keylen*8);
241 ret = -EINVAL;
242 goto error_cipher;
245 if (unlikely(copy_from_user(keyp, sop->key, sop->keylen))) {
246 ret = -EFAULT;
247 goto error_cipher;
250 ret = cryptodev_cipher_init(&ses_new->cdata, alg_name, keyp,
251 sop->keylen);
252 if (ret < 0) {
253 dprintk(1, KERN_DEBUG,
254 "%s: Failed to load cipher for %s\n",
255 __func__, alg_name);
256 ret = -EINVAL;
257 goto error_cipher;
261 if (hash_name) {
262 uint8_t keyp[CRYPTO_HMAC_MAX_KEY_LEN];
264 if (unlikely(sop->mackeylen > CRYPTO_HMAC_MAX_KEY_LEN)) {
265 dprintk(1, KERN_DEBUG,
266 "Setting key failed for %s-%zu.\n",
267 alg_name, (size_t)sop->mackeylen*8);
268 ret = -EINVAL;
269 goto error_hash;
272 if (unlikely(copy_from_user(keyp, sop->mackey,
273 sop->mackeylen))) {
274 ret = -EFAULT;
275 goto error_hash;
278 ret = cryptodev_hash_init(&ses_new->hdata, hash_name, hmac_mode,
279 keyp, sop->mackeylen);
280 if (ret != 0) {
281 dprintk(1, KERN_DEBUG,
282 "%s: Failed to load hash for %s\n",
283 __func__, hash_name);
284 ret = -EINVAL;
285 goto error_hash;
289 ses_new->array_size = DEFAULT_PREALLOC_PAGES;
290 dprintk(2, KERN_DEBUG, "%s: preallocating for %d user pages\n",
291 __func__, ses_new->array_size);
292 ses_new->pages = kzalloc(ses_new->array_size *
293 sizeof(struct page *), GFP_KERNEL);
294 ses_new->sg = kzalloc(ses_new->array_size *
295 sizeof(struct scatterlist), GFP_KERNEL);
296 if (ses_new->sg == NULL || ses_new->pages == NULL) {
297 dprintk(0, KERN_DEBUG, "Memory error\n");
298 ret = -ENOMEM;
299 goto error_hash;
302 /* put the new session to the list */
303 get_random_bytes(&ses_new->sid, sizeof(ses_new->sid));
304 mutex_init(&ses_new->sem);
306 mutex_lock(&fcr->sem);
307 restart:
308 list_for_each_entry(ses_ptr, &fcr->list, entry) {
309 /* Check for duplicate SID */
310 if (unlikely(ses_new->sid == ses_ptr->sid)) {
311 get_random_bytes(&ses_new->sid, sizeof(ses_new->sid));
312 /* Unless we have a broken RNG this
313 shouldn't loop forever... ;-) */
314 goto restart;
318 list_add(&ses_new->entry, &fcr->list);
319 mutex_unlock(&fcr->sem);
321 /* Fill in some values for the user. */
322 sop->ses = ses_new->sid;
324 return 0;
326 error_hash:
327 cryptodev_cipher_deinit(&ses_new->cdata);
328 kfree(ses_new->sg);
329 kfree(ses_new->pages);
330 error_cipher:
331 kfree(ses_new);
333 return ret;
337 /* Everything that needs to be done when remowing a session. */
338 static inline void
339 crypto_destroy_session(struct csession *ses_ptr)
341 if (!mutex_trylock(&ses_ptr->sem)) {
342 dprintk(2, KERN_DEBUG, "Waiting for semaphore of sid=0x%08X\n",
343 ses_ptr->sid);
344 mutex_lock(&ses_ptr->sem);
346 dprintk(2, KERN_DEBUG, "Removed session 0x%08X\n", ses_ptr->sid);
347 #if defined(CRYPTODEV_STATS)
348 if (enable_stats)
349 dprintk(2, KERN_DEBUG,
350 "Usage in Bytes: enc=%llu, dec=%llu, \
351 max=%zu, avg=%lu, cnt=%zu\n",
352 ses_ptr->stat[COP_ENCRYPT], ses_ptr->stat[COP_DECRYPT],
353 ses_ptr->stat_max_size, ses_ptr->stat_count > 0
354 ? ((unsigned long)(ses_ptr->stat[COP_ENCRYPT]+
355 ses_ptr->stat[COP_DECRYPT]) /
356 ses_ptr->stat_count) : 0,
357 ses_ptr->stat_count);
358 #endif
359 cryptodev_cipher_deinit(&ses_ptr->cdata);
360 cryptodev_hash_deinit(&ses_ptr->hdata);
361 dprintk(2, KERN_DEBUG, "%s: freeing space for %d user pages\n",
362 __func__, ses_ptr->array_size);
363 kfree(ses_ptr->pages);
364 kfree(ses_ptr->sg);
365 mutex_unlock(&ses_ptr->sem);
366 kfree(ses_ptr);
369 /* Look up a session by ID and remove. */
370 static int
371 crypto_finish_session(struct fcrypt *fcr, uint32_t sid)
373 struct csession *tmp, *ses_ptr;
374 struct list_head *head;
375 int ret = 0;
377 mutex_lock(&fcr->sem);
378 head = &fcr->list;
379 list_for_each_entry_safe(ses_ptr, tmp, head, entry) {
380 if (ses_ptr->sid == sid) {
381 list_del(&ses_ptr->entry);
382 crypto_destroy_session(ses_ptr);
383 break;
387 if (unlikely(!ses_ptr)) {
388 dprintk(1, KERN_ERR, "Session with sid=0x%08X not found!\n",
389 sid);
390 ret = -ENOENT;
392 mutex_unlock(&fcr->sem);
394 return ret;
397 /* Remove all sessions when closing the file */
398 static int
399 crypto_finish_all_sessions(struct fcrypt *fcr)
401 struct csession *tmp, *ses_ptr;
402 struct list_head *head;
404 mutex_lock(&fcr->sem);
406 head = &fcr->list;
407 list_for_each_entry_safe(ses_ptr, tmp, head, entry) {
408 list_del(&ses_ptr->entry);
409 crypto_destroy_session(ses_ptr);
411 mutex_unlock(&fcr->sem);
413 return 0;
416 /* Look up session by session ID. The returned session is locked. */
417 static struct csession *
418 crypto_get_session_by_sid(struct fcrypt *fcr, uint32_t sid)
420 struct csession *ses_ptr;
422 mutex_lock(&fcr->sem);
423 list_for_each_entry(ses_ptr, &fcr->list, entry) {
424 if (ses_ptr->sid == sid) {
425 mutex_lock(&ses_ptr->sem);
426 break;
429 mutex_unlock(&fcr->sem);
431 return ses_ptr;
434 static int
435 hash_n_crypt(struct csession *ses_ptr, struct crypt_op *cop,
436 struct scatterlist *src_sg, struct scatterlist *dst_sg,
437 uint32_t len)
439 int ret;
441 /* Always hash before encryption and after decryption. Maybe
442 * we should introduce a flag to switch... TBD later on.
444 if (cop->op == COP_ENCRYPT) {
445 if (ses_ptr->hdata.init != 0) {
446 ret = cryptodev_hash_update(&ses_ptr->hdata,
447 src_sg, len);
448 if (unlikely(ret))
449 goto out_err;
451 if (ses_ptr->cdata.init != 0) {
452 ret = cryptodev_cipher_encrypt(&ses_ptr->cdata,
453 src_sg, dst_sg, len);
455 if (unlikely(ret))
456 goto out_err;
458 } else {
459 if (ses_ptr->cdata.init != 0) {
460 ret = cryptodev_cipher_decrypt(&ses_ptr->cdata,
461 src_sg, dst_sg, len);
463 if (unlikely(ret))
464 goto out_err;
467 if (ses_ptr->hdata.init != 0) {
468 ret = cryptodev_hash_update(&ses_ptr->hdata,
469 dst_sg, len);
470 if (unlikely(ret))
471 goto out_err;
474 return 0;
475 out_err:
476 dprintk(0, KERN_ERR, "CryptoAPI failure: %d\n", ret);
477 return ret;
481 /* This is the main crypto function - feed it with plaintext
482 and get a ciphertext (or vice versa :-) */
483 static int
484 __crypto_run_std(struct csession *ses_ptr, struct crypt_op *cop)
486 char *data;
487 char __user *src, *dst;
488 struct scatterlist sg;
489 size_t nbytes, bufsize;
490 int ret = 0;
492 nbytes = cop->len;
493 data = (char *)__get_free_page(GFP_KERNEL);
495 if (unlikely(!data))
496 return -ENOMEM;
498 bufsize = PAGE_SIZE < nbytes ? PAGE_SIZE : nbytes;
500 src = cop->src;
501 dst = cop->dst;
503 while (nbytes > 0) {
504 size_t current_len = nbytes > bufsize ? bufsize : nbytes;
506 if (unlikely(copy_from_user(data, src, current_len))) {
507 ret = -EFAULT;
508 break;
511 sg_init_one(&sg, data, current_len);
513 ret = hash_n_crypt(ses_ptr, cop, &sg, &sg, current_len);
515 if (unlikely(ret))
516 break;
518 if (ses_ptr->cdata.init != 0) {
519 if (unlikely(copy_to_user(dst, data, current_len))) {
520 ret = -EFAULT;
521 break;
525 dst += current_len;
526 nbytes -= current_len;
527 src += current_len;
530 free_page((unsigned long)data);
531 return ret;
534 void release_user_pages(struct page **pg, int pagecount)
536 while (pagecount--) {
537 if (!PageReserved(pg[pagecount]))
538 SetPageDirty(pg[pagecount]);
539 page_cache_release(pg[pagecount]);
543 /* offset of buf in it's first page */
544 #define PAGEOFFSET(buf) ((unsigned long)buf & ~PAGE_MASK)
546 /* fetch the pages addr resides in into pg and initialise sg with them */
547 int __get_userbuf(uint8_t __user *addr, uint32_t len, int write,
548 int pgcount, struct page **pg, struct scatterlist *sg,
549 struct task_struct *task, struct mm_struct *mm)
551 int ret, pglen, i = 0;
552 struct scatterlist *sgp;
554 down_write(&mm->mmap_sem);
555 ret = get_user_pages(task, mm,
556 (unsigned long)addr, pgcount, write, 0, pg, NULL);
557 up_write(&mm->mmap_sem);
558 if (ret != pgcount)
559 return -EINVAL;
561 sg_init_table(sg, pgcount);
563 pglen = min((ptrdiff_t)(PAGE_SIZE - PAGEOFFSET(addr)), (ptrdiff_t)len);
564 sg_set_page(sg, pg[i++], pglen, PAGEOFFSET(addr));
566 len -= pglen;
567 for (sgp = sg_next(sg); len; sgp = sg_next(sgp)) {
568 pglen = min((uint32_t)PAGE_SIZE, len);
569 sg_set_page(sgp, pg[i++], pglen, 0);
570 len -= pglen;
572 sg_mark_end(sg_last(sg, pgcount));
573 return 0;
576 /* make cop->src and cop->dst available in scatterlists */
577 static int get_userbuf(struct csession *ses, struct kernel_crypt_op *kcop,
578 struct scatterlist **src_sg, struct scatterlist **dst_sg,
579 int *tot_pages)
581 int src_pagecount, dst_pagecount = 0, pagecount, write_src = 1;
582 struct crypt_op *cop = &kcop->cop;
583 int rc;
585 if (cop->src == NULL)
586 return -EINVAL;
588 src_pagecount = PAGECOUNT(cop->src, cop->len);
589 if (!ses->cdata.init) { /* hashing only */
590 write_src = 0;
591 } else if (cop->src != cop->dst) { /* non-in-situ transformation */
592 if (cop->dst == NULL)
593 return -EINVAL;
595 dst_pagecount = PAGECOUNT(cop->dst, cop->len);
596 write_src = 0;
598 (*tot_pages) = pagecount = src_pagecount + dst_pagecount;
600 if (pagecount > ses->array_size) {
601 struct scatterlist *sg;
602 struct page **pages;
603 int array_size;
605 for (array_size = ses->array_size; array_size < pagecount;
606 array_size *= 2)
609 dprintk(2, KERN_DEBUG, "%s: reallocating to %d elements\n",
610 __func__, array_size);
611 pages = krealloc(ses->pages, array_size * sizeof(struct page *),
612 GFP_KERNEL);
613 if (unlikely(!pages))
614 return -ENOMEM;
615 ses->pages = pages;
616 sg = krealloc(ses->sg, array_size * sizeof(struct scatterlist),
617 GFP_KERNEL);
618 if (unlikely(!sg))
619 return -ENOMEM;
620 ses->sg = sg;
621 ses->array_size = array_size;
624 rc = __get_userbuf(cop->src, cop->len, write_src, src_pagecount,
625 ses->pages, ses->sg, kcop->task, kcop->mm);
626 if (unlikely(rc)) {
627 dprintk(1, KERN_ERR,
628 "failed to get user pages for data input\n");
629 return -EINVAL;
631 (*src_sg) = (*dst_sg) = ses->sg;
633 if (!dst_pagecount)
634 return 0;
636 (*dst_sg) = ses->sg + src_pagecount;
638 rc = __get_userbuf(cop->dst, cop->len, 1, dst_pagecount,
639 ses->pages + src_pagecount, *dst_sg,
640 kcop->task, kcop->mm);
641 if (unlikely(rc)) {
642 dprintk(1, KERN_ERR,
643 "failed to get user pages for data output\n");
644 release_user_pages(ses->pages, src_pagecount);
645 return -EINVAL;
647 return 0;
650 /* This is the main crypto function - zero-copy edition */
651 static int
652 __crypto_run_zc(struct csession *ses_ptr, struct kernel_crypt_op *kcop)
654 struct scatterlist *src_sg, *dst_sg;
655 struct crypt_op *cop = &kcop->cop;
656 int ret = 0, pagecount;
658 ret = get_userbuf(ses_ptr, kcop, &src_sg, &dst_sg, &pagecount);
659 if (unlikely(ret)) {
660 dprintk(1, KERN_ERR, "Error getting user pages. \
661 Falling back to non zero copy.\n");
662 return __crypto_run_std(ses_ptr, cop);
665 ret = hash_n_crypt(ses_ptr, cop, src_sg, dst_sg, cop->len);
667 release_user_pages(ses_ptr->pages, pagecount);
668 return ret;
671 static int crypto_run(struct fcrypt *fcr, struct kernel_crypt_op *kcop)
673 struct csession *ses_ptr;
674 struct crypt_op *cop = &kcop->cop;
675 int ret;
677 if (unlikely(cop->op != COP_ENCRYPT && cop->op != COP_DECRYPT)) {
678 dprintk(1, KERN_DEBUG, "invalid operation op=%u\n", cop->op);
679 return -EINVAL;
682 /* this also enters ses_ptr->sem */
683 ses_ptr = crypto_get_session_by_sid(fcr, cop->ses);
684 if (unlikely(!ses_ptr)) {
685 dprintk(1, KERN_ERR, "invalid session ID=0x%08X\n", cop->ses);
686 return -EINVAL;
689 if (ses_ptr->hdata.init != 0 && !(cop->flags & COP_FLAG_UPDATE) &&
690 !(cop->flags & COP_FLAG_FINAL)) {
691 ret = cryptodev_hash_reset(&ses_ptr->hdata);
692 if (unlikely(ret)) {
693 dprintk(1, KERN_ERR,
694 "error in cryptodev_hash_reset()\n");
695 goto out_unlock;
699 if (ses_ptr->cdata.init != 0) {
700 int blocksize = ses_ptr->cdata.blocksize;
702 if (unlikely(cop->len % blocksize)) {
703 dprintk(1, KERN_ERR,
704 "data size (%u) isn't a multiple \
705 of block size (%u)\n",
706 cop->len, blocksize);
707 ret = -EINVAL;
708 goto out_unlock;
711 cryptodev_cipher_set_iv(&ses_ptr->cdata, kcop->iv,
712 min(ses_ptr->cdata.ivsize, kcop->ivlen));
715 if (cop->len != 0) {
716 ret = __crypto_run_zc(ses_ptr, kcop);
717 if (unlikely(ret))
718 goto out_unlock;
721 if (ses_ptr->hdata.init != 0 &&
722 ((cop->flags & COP_FLAG_FINAL) ||
723 (!(cop->flags & COP_FLAG_UPDATE) || cop->len == 0))) {
725 ret = cryptodev_hash_final(&ses_ptr->hdata, kcop->hash_output);
726 if (unlikely(ret)) {
727 dprintk(0, KERN_ERR, "CryptoAPI failure: %d\n", ret);
728 goto out_unlock;
732 #if defined(CRYPTODEV_STATS)
733 if (enable_stats) {
734 /* this is safe - we check cop->op at the function entry */
735 ses_ptr->stat[cop->op] += cop->len;
736 if (ses_ptr->stat_max_size < cop->len)
737 ses_ptr->stat_max_size = cop->len;
738 ses_ptr->stat_count++;
740 #endif
742 out_unlock:
743 mutex_unlock(&ses_ptr->sem);
744 return ret;
747 static void cryptask_routine(struct work_struct *work)
749 struct crypt_priv *pcr = container_of(work, struct crypt_priv, cryptask);
750 struct todo_list_item *item;
751 LIST_HEAD(tmp);
753 /* fetch all pending jobs into the temporary list */
754 mutex_lock(&pcr->todo.lock);
755 list_cut_position(&tmp, &pcr->todo.list, pcr->todo.list.prev);
756 mutex_unlock(&pcr->todo.lock);
758 /* handle each job locklessly */
759 list_for_each_entry(item, &tmp, __hook) {
760 item->result = crypto_run(&pcr->fcrypt, &item->kcop);
761 if (unlikely(item->result))
762 dprintk(0, KERN_ERR, "%s: crypto_run() failed: %d\n",
763 __func__, item->result);
766 /* push all handled jobs to the done list at once */
767 mutex_lock(&pcr->done.lock);
768 list_splice_tail(&tmp, &pcr->done.list);
769 mutex_unlock(&pcr->done.lock);
771 /* wake for POLLIN */
772 wake_up_interruptible(&pcr->user_waiter);
775 /* ====== /dev/crypto ====== */
777 static int
778 cryptodev_open(struct inode *inode, struct file *filp)
780 struct todo_list_item *tmp;
781 struct crypt_priv *pcr;
782 int i;
784 pcr = kmalloc(sizeof(*pcr), GFP_KERNEL);
785 if (!pcr)
786 return -ENOMEM;
788 memset(pcr, 0, sizeof(*pcr));
789 mutex_init(&pcr->fcrypt.sem);
790 INIT_LIST_HEAD(&pcr->fcrypt.list);
792 INIT_LIST_HEAD(&pcr->free.list);
793 INIT_LIST_HEAD(&pcr->todo.list);
794 INIT_LIST_HEAD(&pcr->done.list);
795 INIT_WORK(&pcr->cryptask, cryptask_routine);
796 mutex_init(&pcr->free.lock);
797 mutex_init(&pcr->todo.lock);
798 mutex_init(&pcr->done.lock);
799 init_waitqueue_head(&pcr->user_waiter);
801 for (i = 0; i < DEF_COP_RINGSIZE; i++) {
802 tmp = kzalloc(sizeof(struct todo_list_item), GFP_KERNEL);
803 pcr->itemcount++;
804 dprintk(2, KERN_DEBUG, "%s: allocated new item at %lx\n",
805 __func__, (unsigned long)tmp);
806 list_add(&tmp->__hook, &pcr->free.list);
809 filp->private_data = pcr;
810 dprintk(2, KERN_DEBUG,
811 "Cryptodev handle initialised, %d elements in queue\n",
812 DEF_COP_RINGSIZE);
813 return 0;
816 static int
817 cryptodev_release(struct inode *inode, struct file *filp)
819 struct crypt_priv *pcr = filp->private_data;
820 struct todo_list_item *item, *item_safe;
821 int items_freed = 0;
823 if (!pcr)
824 return 0;
826 cancel_work_sync(&pcr->cryptask);
828 mutex_destroy(&pcr->todo.lock);
829 mutex_destroy(&pcr->done.lock);
830 mutex_destroy(&pcr->free.lock);
832 list_splice_tail(&pcr->todo.list, &pcr->free.list);
833 list_splice_tail(&pcr->done.list, &pcr->free.list);
835 list_for_each_entry_safe(item, item_safe, &pcr->free.list, __hook) {
836 dprintk(2, KERN_DEBUG, "%s: freeing item at %lx\n",
837 __func__, (unsigned long)item);
838 list_del(&item->__hook);
839 kfree(item);
840 items_freed++;
843 if (items_freed != pcr->itemcount) {
844 dprintk(0, KERN_ERR,
845 "%s: freed %d items, but %d should exist!\n",
846 __func__, items_freed, pcr->itemcount);
849 crypto_finish_all_sessions(&pcr->fcrypt);
850 kfree(pcr);
851 filp->private_data = NULL;
853 dprintk(2, KERN_DEBUG,
854 "Cryptodev handle deinitialised, %d elements freed\n",
855 items_freed);
856 return 0;
859 static int
860 clonefd(struct file *filp)
862 int ret;
863 ret = get_unused_fd();
864 if (ret >= 0) {
865 get_file(filp);
866 fd_install(ret, filp);
869 return ret;
872 /* enqueue a job for asynchronous completion
874 * returns:
875 * -EBUSY when there are no free queue slots left
876 * (and the number of slots has reached it MAX_COP_RINGSIZE)
877 * -EFAULT when there was a memory allocation error
878 * 0 on success */
879 static int crypto_async_run(struct crypt_priv *pcr, struct kernel_crypt_op *kcop)
881 struct todo_list_item *item = NULL;
883 mutex_lock(&pcr->free.lock);
884 if (likely(!list_empty(&pcr->free.list))) {
885 item = list_first_entry(&pcr->free.list,
886 struct todo_list_item, __hook);
887 list_del(&item->__hook);
888 } else if (pcr->itemcount < MAX_COP_RINGSIZE) {
889 pcr->itemcount++;
890 } else {
891 mutex_unlock(&pcr->free.lock);
892 return -EBUSY;
894 mutex_unlock(&pcr->free.lock);
896 if (unlikely(!item)) {
897 item = kzalloc(sizeof(struct todo_list_item), GFP_KERNEL);
898 if (unlikely(!item))
899 return -EFAULT;
900 dprintk(1, KERN_INFO, "%s: increased item count to %d\n",
901 __func__, pcr->itemcount);
904 memcpy(&item->kcop, kcop, sizeof(struct kernel_crypt_op));
906 mutex_lock(&pcr->todo.lock);
907 list_add_tail(&item->__hook, &pcr->todo.list);
908 mutex_unlock(&pcr->todo.lock);
910 queue_work(cryptodev_wq, &pcr->cryptask);
911 return 0;
914 /* get the first completed job from the "done" queue
916 * returns:
917 * -EBUSY if no completed jobs are ready (yet)
918 * the return value of crypto_run() otherwise */
919 static int crypto_async_fetch(struct crypt_priv *pcr,
920 struct kernel_crypt_op *kcop)
922 struct todo_list_item *item;
923 int retval;
925 mutex_lock(&pcr->done.lock);
926 if (list_empty(&pcr->done.list)) {
927 mutex_unlock(&pcr->done.lock);
928 return -EBUSY;
930 item = list_first_entry(&pcr->done.list, struct todo_list_item, __hook);
931 list_del(&item->__hook);
932 mutex_unlock(&pcr->done.lock);
934 memcpy(kcop, &item->kcop, sizeof(struct kernel_crypt_op));
935 retval = item->result;
937 mutex_lock(&pcr->free.lock);
938 list_add_tail(&item->__hook, &pcr->free.list);
939 mutex_unlock(&pcr->free.lock);
941 /* wake for POLLOUT */
942 wake_up_interruptible(&pcr->user_waiter);
944 return retval;
947 /* this function has to be called from process context */
948 static int fill_kcop_from_cop(struct kernel_crypt_op *kcop, struct fcrypt *fcr)
950 struct crypt_op *cop = &kcop->cop;
951 struct csession *ses_ptr;
952 int rc;
954 /* this also enters ses_ptr->sem */
955 ses_ptr = crypto_get_session_by_sid(fcr, cop->ses);
956 if (unlikely(!ses_ptr)) {
957 dprintk(1, KERN_ERR, "invalid session ID=0x%08X\n", cop->ses);
958 return -EINVAL;
960 kcop->ivlen = cop->iv ? ses_ptr->cdata.ivsize : 0;
961 kcop->digestsize = ses_ptr->hdata.digestsize;
962 mutex_unlock(&ses_ptr->sem);
964 kcop->task = current;
965 kcop->mm = current->mm;
967 rc = copy_from_user(kcop->iv, cop->iv, kcop->ivlen);
968 if (likely(!rc))
969 return 0;
971 dprintk(1, KERN_ERR,
972 "error copying IV (%d bytes), copy_from_user returned %d for address %lx\n",
973 kcop->ivlen, rc, (unsigned long)cop->iv);
974 return -EFAULT;
977 static int kcop_from_user(struct kernel_crypt_op *kcop,
978 struct fcrypt *fcr, void __user *arg)
980 if (unlikely(copy_from_user(&kcop->cop, arg, sizeof(kcop->cop))))
981 return -EFAULT;
983 return fill_kcop_from_cop(kcop, fcr);
986 static long
987 cryptodev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg_)
989 void __user *arg = (void __user *)arg_;
990 int __user *p = arg;
991 struct session_op sop;
992 struct kernel_crypt_op kcop;
993 struct crypt_priv *pcr = filp->private_data;
994 struct fcrypt *fcr;
995 uint32_t ses;
996 int ret, fd;
998 if (unlikely(!pcr))
999 BUG();
1001 fcr = &pcr->fcrypt;
1003 switch (cmd) {
1004 case CIOCASYMFEAT:
1005 return put_user(0, p);
1006 case CRIOGET:
1007 fd = clonefd(filp);
1008 ret = put_user(fd, p);
1009 if (unlikely(ret)) {
1010 sys_close(fd);
1011 return ret;
1013 return ret;
1014 case CIOCGSESSION:
1015 if (unlikely(copy_from_user(&sop, arg, sizeof(sop))))
1016 return -EFAULT;
1018 ret = crypto_create_session(fcr, &sop);
1019 if (unlikely(ret))
1020 return ret;
1021 ret = copy_to_user(arg, &sop, sizeof(sop));
1022 if (unlikely(ret)) {
1023 crypto_finish_session(fcr, sop.ses);
1024 return -EFAULT;
1026 return ret;
1027 case CIOCFSESSION:
1028 ret = get_user(ses, (uint32_t __user *)arg);
1029 if (unlikely(ret))
1030 return ret;
1031 ret = crypto_finish_session(fcr, ses);
1032 return ret;
1033 case CIOCCRYPT:
1034 if (unlikely(ret = kcop_from_user(&kcop, fcr, arg)))
1035 return ret;
1037 ret = crypto_run(fcr, &kcop);
1038 if (unlikely(ret))
1039 return ret;
1040 ret = copy_to_user(kcop.cop.mac,
1041 kcop.hash_output, kcop.digestsize);
1042 if (unlikely(ret))
1043 return ret;
1044 if (unlikely(copy_to_user(arg, &kcop.cop, sizeof(kcop.cop))))
1045 return -EFAULT;
1046 return 0;
1047 case CIOCASYNCCRYPT:
1048 if (unlikely(ret = kcop_from_user(&kcop, fcr, arg)))
1049 return ret;
1051 return crypto_async_run(pcr, &kcop);
1052 case CIOCASYNCFETCH:
1053 ret = crypto_async_fetch(pcr, &kcop);
1054 if (unlikely(ret))
1055 return ret;
1057 ret = copy_to_user(kcop.cop.mac,
1058 kcop.hash_output, kcop.digestsize);
1059 if (unlikely(ret))
1060 return ret;
1062 return copy_to_user(arg, &kcop.cop, sizeof(kcop.cop));
1064 default:
1065 return -EINVAL;
1069 /* compatibility code for 32bit userlands */
1070 #ifdef CONFIG_COMPAT
1072 static inline void
1073 compat_to_session_op(struct compat_session_op *compat, struct session_op *sop)
1075 sop->cipher = compat->cipher;
1076 sop->mac = compat->mac;
1077 sop->keylen = compat->keylen;
1079 sop->key = compat_ptr(compat->key);
1080 sop->mackeylen = compat->mackeylen;
1081 sop->mackey = compat_ptr(compat->mackey);
1082 sop->ses = compat->ses;
1085 static inline void
1086 session_op_to_compat(struct session_op *sop, struct compat_session_op *compat)
1088 compat->cipher = sop->cipher;
1089 compat->mac = sop->mac;
1090 compat->keylen = sop->keylen;
1092 compat->key = ptr_to_compat(sop->key);
1093 compat->mackeylen = sop->mackeylen;
1094 compat->mackey = ptr_to_compat(sop->mackey);
1095 compat->ses = sop->ses;
1098 static inline void
1099 compat_to_crypt_op(struct compat_crypt_op *compat, struct crypt_op *cop)
1101 cop->ses = compat->ses;
1102 cop->op = compat->op;
1103 cop->flags = compat->flags;
1104 cop->len = compat->len;
1106 cop->src = compat_ptr(compat->src);
1107 cop->dst = compat_ptr(compat->dst);
1108 cop->mac = compat_ptr(compat->mac);
1109 cop->iv = compat_ptr(compat->iv);
1112 static inline void
1113 crypt_op_to_compat(struct crypt_op *cop, struct compat_crypt_op *compat)
1115 compat->ses = cop->ses;
1116 compat->op = cop->op;
1117 compat->flags = cop->flags;
1118 compat->len = cop->len;
1120 compat->src = ptr_to_compat(cop->src);
1121 compat->dst = ptr_to_compat(cop->dst);
1122 compat->mac = ptr_to_compat(cop->mac);
1123 compat->iv = ptr_to_compat(cop->iv);
1126 static int compat_kcop_from_user(struct kernel_crypt_op *kcop,
1127 struct fcrypt *fcr, void __user *arg)
1129 struct compat_crypt_op compat_cop;
1131 if (unlikely(copy_from_user(&compat_cop, arg, sizeof(compat_cop))))
1132 return -EFAULT;
1133 compat_to_crypt_op(&compat_cop, &kcop->cop);
1135 return fill_kcop_from_cop(kcop, fcr);
1138 static long
1139 cryptodev_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg_)
1141 void __user *arg = (void __user *)arg_;
1142 struct crypt_priv *pcr = file->private_data;
1143 struct fcrypt *fcr;
1144 struct session_op sop;
1145 struct compat_session_op compat_sop;
1146 struct kernel_crypt_op kcop;
1147 struct compat_crypt_op compat_cop;
1148 int ret;
1150 if (unlikely(!pcr))
1151 BUG();
1153 fcr = &pcr->fcrypt;
1155 switch (cmd) {
1156 case CIOCASYMFEAT:
1157 case CRIOGET:
1158 case CIOCFSESSION:
1159 return cryptodev_ioctl(file, cmd, arg_);
1161 case COMPAT_CIOCGSESSION:
1162 if (unlikely(copy_from_user(&compat_sop, arg,
1163 sizeof(compat_sop))))
1164 return -EFAULT;
1165 compat_to_session_op(&compat_sop, &sop);
1167 ret = crypto_create_session(fcr, &sop);
1168 if (unlikely(ret))
1169 return ret;
1171 session_op_to_compat(&sop, &compat_sop);
1172 ret = copy_to_user(arg, &compat_sop, sizeof(compat_sop));
1173 if (unlikely(ret)) {
1174 crypto_finish_session(fcr, sop.ses);
1175 return -EFAULT;
1177 return ret;
1179 case COMPAT_CIOCCRYPT:
1180 ret = compat_kcop_from_user(&kcop, fcr, arg);
1181 if (unlikely(ret))
1182 return ret;
1184 ret = crypto_run(fcr, &kcop);
1185 if (unlikely(ret))
1186 return ret;
1188 ret = copy_to_user(kcop.cop.mac,
1189 kcop.hash_output, kcop.digestsize);
1190 if (unlikely(ret))
1191 return ret;
1193 crypt_op_to_compat(&kcop.cop, &compat_cop);
1194 if (unlikely(copy_to_user(arg, &compat_cop,
1195 sizeof(compat_cop))))
1196 return -EFAULT;
1197 return 0;
1198 case COMPAT_CIOCASYNCCRYPT:
1199 if (unlikely(ret = compat_kcop_from_user(&kcop, fcr, arg)))
1200 return ret;
1202 return crypto_async_run(pcr, &kcop);
1203 case COMPAT_CIOCASYNCFETCH:
1204 ret = crypto_async_fetch(pcr, &kcop);
1205 if (unlikely(ret))
1206 return ret;
1208 ret = copy_to_user(kcop.cop.mac,
1209 kcop.hash_output, kcop.digestsize);
1210 if (unlikely(ret))
1211 return ret;
1213 crypt_op_to_compat(&kcop.cop, &compat_cop);
1214 return copy_to_user(arg, &compat_cop, sizeof(compat_cop));
1216 default:
1217 return -EINVAL;
1221 #endif /* CONFIG_COMPAT */
1223 static unsigned int cryptodev_poll(struct file *file, poll_table *wait)
1225 struct crypt_priv *pcr = file->private_data;
1226 int ret = 0;
1228 poll_wait(file, &pcr->user_waiter, wait);
1230 if (!list_empty_careful(&pcr->done.list))
1231 ret |= POLLIN | POLLRDNORM;
1232 if (!list_empty_careful(&pcr->free.list) || pcr->itemcount < MAX_COP_RINGSIZE)
1233 ret |= POLLOUT | POLLWRNORM;
1235 return ret;
1238 static const struct file_operations cryptodev_fops = {
1239 .owner = THIS_MODULE,
1240 .open = cryptodev_open,
1241 .release = cryptodev_release,
1242 .unlocked_ioctl = cryptodev_ioctl,
1243 #ifdef CONFIG_COMPAT
1244 .compat_ioctl = cryptodev_compat_ioctl,
1245 #endif /* CONFIG_COMPAT */
1246 .poll = cryptodev_poll,
1249 static struct miscdevice cryptodev = {
1250 .minor = MISC_DYNAMIC_MINOR,
1251 .name = "crypto",
1252 .fops = &cryptodev_fops,
1255 static int __init
1256 cryptodev_register(void)
1258 int rc;
1260 rc = misc_register(&cryptodev);
1261 if (unlikely(rc)) {
1262 printk(KERN_ERR PFX "registration of /dev/crypto failed\n");
1263 return rc;
1266 return 0;
1269 static void __exit
1270 cryptodev_deregister(void)
1272 misc_deregister(&cryptodev);
1275 /* ====== Module init/exit ====== */
1276 static int __init init_cryptodev(void)
1278 int rc;
1280 cryptodev_wq = create_workqueue("cryptodev_queue");
1281 if (unlikely(!cryptodev_wq)) {
1282 printk(KERN_ERR PFX "failed to allocate the cryptodev workqueue\n");
1283 return -EFAULT;
1286 rc = cryptodev_register();
1287 if (unlikely(rc)) {
1288 destroy_workqueue(cryptodev_wq);
1289 return rc;
1292 printk(KERN_INFO PFX "driver %s loaded.\n", VERSION);
1294 return 0;
1297 static void __exit exit_cryptodev(void)
1299 flush_workqueue(cryptodev_wq);
1300 destroy_workqueue(cryptodev_wq);
1302 cryptodev_deregister();
1303 printk(KERN_INFO PFX "driver unloaded.\n");
1306 module_init(init_cryptodev);
1307 module_exit(exit_cryptodev);