Allow to encrypt SRTP records.
[cryptodev-linux.git] / cryptodev_main.c
blob0c97f0f79c15768299ee4342de300753b66a5f4b
1 /*
2 * Driver for /dev/crypto device (aka CryptoDev)
4 * Copyright (c) 2004 Michal Ludvig <mludvig@logix.net.nz>, SuSE Labs
5 * Copyright (c) 2009,2010 Nikos Mavrogiannopoulos <nmav@gnutls.org>
7 * This file is part of linux cryptodev.
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc.,
22 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
26 * Device /dev/crypto provides an interface for
27 * accessing kernel CryptoAPI algorithms (ciphers,
28 * hashes) from userspace programs.
30 * /dev/crypto interface was originally introduced in
31 * OpenBSD and this module attempts to keep the API.
35 #include <crypto/hash.h>
36 #include <linux/crypto.h>
37 #include <linux/mm.h>
38 #include <linux/highmem.h>
39 #include <linux/ioctl.h>
40 #include <linux/random.h>
41 #include <linux/syscalls.h>
42 #include <linux/pagemap.h>
43 #include <linux/poll.h>
44 #include <linux/uaccess.h>
45 #include <crypto/cryptodev.h>
46 #include <linux/scatterlist.h>
47 #include "cryptodev_int.h"
48 #include "version.h"
50 MODULE_AUTHOR("Nikos Mavrogiannopoulos <nmav@gnutls.org>");
51 MODULE_DESCRIPTION("CryptoDev driver");
52 MODULE_LICENSE("GPL");
54 /* ====== Compile-time config ====== */
56 /* Default (pre-allocated) and maximum size of the job queue.
57 * These are free, pending and done items all together. */
58 #define DEF_COP_RINGSIZE 16
59 #define MAX_COP_RINGSIZE 64
61 /* ====== Module parameters ====== */
63 int cryptodev_verbosity;
64 module_param(cryptodev_verbosity, int, 0644);
65 MODULE_PARM_DESC(cryptodev_verbosity, "0: normal, 1: verbose, 2: debug");
67 #ifdef CRYPTODEV_STATS
68 static int enable_stats;
69 module_param(enable_stats, int, 0644);
70 MODULE_PARM_DESC(enable_stats, "collect statictics about cryptodev usage");
71 #endif
73 /* ====== CryptoAPI ====== */
74 struct todo_list_item {
75 struct list_head __hook;
76 struct kernel_crypt_op kcop;
77 int result;
80 struct locked_list {
81 struct list_head list;
82 struct mutex lock;
85 struct crypt_priv {
86 struct fcrypt fcrypt;
87 struct locked_list free, todo, done;
88 int itemcount;
89 struct work_struct cryptask;
90 wait_queue_head_t user_waiter;
93 #define FILL_SG(sg, ptr, len) \
94 do { \
95 (sg)->page = virt_to_page(ptr); \
96 (sg)->offset = offset_in_page(ptr); \
97 (sg)->length = len; \
98 (sg)->dma_address = 0; \
99 } while (0)
101 /* cryptodev's own workqueue, keeps crypto tasks from disturbing the force */
102 static struct workqueue_struct *cryptodev_wq;
104 /* Prepare session for future use. */
105 static int
106 crypto_create_session(struct fcrypt *fcr, struct session_op *sop)
108 struct csession *ses_new = NULL, *ses_ptr;
109 int ret = 0;
110 const char *alg_name = NULL;
111 const char *hash_name = NULL;
112 int hmac_mode = 1, stream = 0;
114 /* Does the request make sense? */
115 if (unlikely(!sop->cipher && !sop->mac)) {
116 dprintk(1, KERN_DEBUG, "Both 'cipher' and 'mac' unset.\n");
117 return -EINVAL;
120 switch (sop->cipher) {
121 case 0:
122 break;
123 case CRYPTO_DES_CBC:
124 alg_name = "cbc(des)";
125 break;
126 case CRYPTO_3DES_CBC:
127 alg_name = "cbc(des3_ede)";
128 break;
129 case CRYPTO_BLF_CBC:
130 alg_name = "cbc(blowfish)";
131 break;
132 case CRYPTO_AES_CBC:
133 alg_name = "cbc(aes)";
134 break;
135 case CRYPTO_AES_ECB:
136 alg_name = "ecb(aes)";
137 break;
138 case CRYPTO_CAMELLIA_CBC:
139 alg_name = "cbc(camelia)";
140 break;
141 case CRYPTO_AES_CTR:
142 alg_name = "ctr(aes)";
143 stream = 1;
144 break;
145 case CRYPTO_NULL:
146 alg_name = "ecb(cipher_null)";
147 stream = 1;
148 break;
149 default:
150 dprintk(1, KERN_DEBUG, "%s: bad cipher: %d\n", __func__,
151 sop->cipher);
152 return -EINVAL;
155 switch (sop->mac) {
156 case 0:
157 break;
158 case CRYPTO_MD5_HMAC:
159 hash_name = "hmac(md5)";
160 break;
161 case CRYPTO_RIPEMD160_HMAC:
162 hash_name = "hmac(rmd160)";
163 break;
164 case CRYPTO_SHA1_HMAC:
165 hash_name = "hmac(sha1)";
166 break;
167 case CRYPTO_SHA2_256_HMAC:
168 hash_name = "hmac(sha256)";
169 break;
170 case CRYPTO_SHA2_384_HMAC:
171 hash_name = "hmac(sha384)";
172 break;
173 case CRYPTO_SHA2_512_HMAC:
174 hash_name = "hmac(sha512)";
175 break;
177 /* non-hmac cases */
178 case CRYPTO_MD5:
179 hash_name = "md5";
180 hmac_mode = 0;
181 break;
182 case CRYPTO_RIPEMD160:
183 hash_name = "rmd160";
184 hmac_mode = 0;
185 break;
186 case CRYPTO_SHA1:
187 hash_name = "sha1";
188 hmac_mode = 0;
189 break;
190 case CRYPTO_SHA2_256:
191 hash_name = "sha256";
192 hmac_mode = 0;
193 break;
194 case CRYPTO_SHA2_384:
195 hash_name = "sha384";
196 hmac_mode = 0;
197 break;
198 case CRYPTO_SHA2_512:
199 hash_name = "sha512";
200 hmac_mode = 0;
201 break;
203 default:
204 dprintk(1, KERN_DEBUG, "%s: bad mac: %d\n", __func__,
205 sop->mac);
206 return -EINVAL;
209 /* Create a session and put it to the list. */
210 ses_new = kzalloc(sizeof(*ses_new), GFP_KERNEL);
211 if (!ses_new)
212 return -ENOMEM;
214 /* Set-up crypto transform. */
215 if (alg_name) {
216 uint8_t keyp[CRYPTO_CIPHER_MAX_KEY_LEN];
218 if (unlikely(sop->keylen > CRYPTO_CIPHER_MAX_KEY_LEN)) {
219 dprintk(1, KERN_DEBUG,
220 "Setting key failed for %s-%zu.\n",
221 alg_name, (size_t)sop->keylen*8);
222 ret = -EINVAL;
223 goto error_cipher;
226 if (unlikely(copy_from_user(keyp, sop->key, sop->keylen))) {
227 ret = -EFAULT;
228 goto error_cipher;
231 ret = cryptodev_cipher_init(&ses_new->cdata, alg_name, keyp,
232 sop->keylen, stream);
233 if (ret < 0) {
234 dprintk(1, KERN_DEBUG,
235 "%s: Failed to load cipher for %s\n",
236 __func__, alg_name);
237 ret = -EINVAL;
238 goto error_cipher;
242 if (hash_name) {
243 uint8_t keyp[CRYPTO_HMAC_MAX_KEY_LEN];
245 if (unlikely(sop->mackeylen > CRYPTO_HMAC_MAX_KEY_LEN)) {
246 dprintk(1, KERN_DEBUG,
247 "Setting key failed for %s-%zu.\n",
248 alg_name, (size_t)sop->mackeylen*8);
249 ret = -EINVAL;
250 goto error_hash;
253 if (sop->mackey && unlikely(copy_from_user(keyp, sop->mackey,
254 sop->mackeylen))) {
255 ret = -EFAULT;
256 goto error_hash;
259 ret = cryptodev_hash_init(&ses_new->hdata, hash_name, hmac_mode,
260 keyp, sop->mackeylen);
261 if (ret != 0) {
262 dprintk(1, KERN_DEBUG,
263 "%s: Failed to load hash for %s\n",
264 __func__, hash_name);
265 ret = -EINVAL;
266 goto error_hash;
270 ses_new->alignmask = max(ses_new->cdata.alignmask,
271 ses_new->hdata.alignmask);
272 dprintk(2, KERN_DEBUG, "%s: got alignmask %d\n", __func__, ses_new->alignmask);
274 ses_new->array_size = DEFAULT_PREALLOC_PAGES;
275 dprintk(2, KERN_DEBUG, "%s: preallocating for %d user pages\n",
276 __func__, ses_new->array_size);
277 ses_new->pages = kzalloc(ses_new->array_size *
278 sizeof(struct page *), GFP_KERNEL);
279 ses_new->sg = kzalloc(ses_new->array_size *
280 sizeof(struct scatterlist), GFP_KERNEL);
281 if (ses_new->sg == NULL || ses_new->pages == NULL) {
282 dprintk(0, KERN_DEBUG, "Memory error\n");
283 ret = -ENOMEM;
284 goto error_hash;
287 /* put the new session to the list */
288 get_random_bytes(&ses_new->sid, sizeof(ses_new->sid));
289 mutex_init(&ses_new->sem);
291 mutex_lock(&fcr->sem);
292 restart:
293 list_for_each_entry(ses_ptr, &fcr->list, entry) {
294 /* Check for duplicate SID */
295 if (unlikely(ses_new->sid == ses_ptr->sid)) {
296 get_random_bytes(&ses_new->sid, sizeof(ses_new->sid));
297 /* Unless we have a broken RNG this
298 shouldn't loop forever... ;-) */
299 goto restart;
303 list_add(&ses_new->entry, &fcr->list);
304 mutex_unlock(&fcr->sem);
306 /* Fill in some values for the user. */
307 sop->ses = ses_new->sid;
309 return 0;
311 error_hash:
312 cryptodev_cipher_deinit(&ses_new->cdata);
313 kfree(ses_new->sg);
314 kfree(ses_new->pages);
315 error_cipher:
316 kfree(ses_new);
318 return ret;
322 /* Everything that needs to be done when remowing a session. */
323 static inline void
324 crypto_destroy_session(struct csession *ses_ptr)
326 if (!mutex_trylock(&ses_ptr->sem)) {
327 dprintk(2, KERN_DEBUG, "Waiting for semaphore of sid=0x%08X\n",
328 ses_ptr->sid);
329 mutex_lock(&ses_ptr->sem);
331 dprintk(2, KERN_DEBUG, "Removed session 0x%08X\n", ses_ptr->sid);
332 #if defined(CRYPTODEV_STATS)
333 if (enable_stats)
334 dprintk(2, KERN_DEBUG,
335 "Usage in Bytes: enc=%llu, dec=%llu, "
336 "max=%zu, avg=%lu, cnt=%zu\n",
337 ses_ptr->stat[COP_ENCRYPT], ses_ptr->stat[COP_DECRYPT],
338 ses_ptr->stat_max_size, ses_ptr->stat_count > 0
339 ? ((unsigned long)(ses_ptr->stat[COP_ENCRYPT]+
340 ses_ptr->stat[COP_DECRYPT]) /
341 ses_ptr->stat_count) : 0,
342 ses_ptr->stat_count);
343 #endif
344 cryptodev_cipher_deinit(&ses_ptr->cdata);
345 cryptodev_hash_deinit(&ses_ptr->hdata);
346 dprintk(2, KERN_DEBUG, "%s: freeing space for %d user pages\n",
347 __func__, ses_ptr->array_size);
348 kfree(ses_ptr->pages);
349 kfree(ses_ptr->sg);
350 mutex_unlock(&ses_ptr->sem);
351 kfree(ses_ptr);
354 /* Look up a session by ID and remove. */
355 static int
356 crypto_finish_session(struct fcrypt *fcr, uint32_t sid)
358 struct csession *tmp, *ses_ptr;
359 struct list_head *head;
360 int ret = 0;
362 mutex_lock(&fcr->sem);
363 head = &fcr->list;
364 list_for_each_entry_safe(ses_ptr, tmp, head, entry) {
365 if (ses_ptr->sid == sid) {
366 list_del(&ses_ptr->entry);
367 crypto_destroy_session(ses_ptr);
368 break;
372 if (unlikely(!ses_ptr)) {
373 dprintk(1, KERN_ERR, "Session with sid=0x%08X not found!\n",
374 sid);
375 ret = -ENOENT;
377 mutex_unlock(&fcr->sem);
379 return ret;
382 /* Remove all sessions when closing the file */
383 static int
384 crypto_finish_all_sessions(struct fcrypt *fcr)
386 struct csession *tmp, *ses_ptr;
387 struct list_head *head;
389 mutex_lock(&fcr->sem);
391 head = &fcr->list;
392 list_for_each_entry_safe(ses_ptr, tmp, head, entry) {
393 list_del(&ses_ptr->entry);
394 crypto_destroy_session(ses_ptr);
396 mutex_unlock(&fcr->sem);
398 return 0;
401 /* Look up session by session ID. The returned session is locked. */
402 struct csession *
403 crypto_get_session_by_sid(struct fcrypt *fcr, uint32_t sid)
405 struct csession *ses_ptr, *retval = 0;
407 mutex_lock(&fcr->sem);
408 list_for_each_entry(ses_ptr, &fcr->list, entry) {
409 if (ses_ptr->sid == sid) {
410 mutex_lock(&ses_ptr->sem);
411 retval = ses_ptr;
412 break;
415 mutex_unlock(&fcr->sem);
417 return retval;
420 static int
421 hash_n_crypt(struct csession *ses_ptr, struct crypt_op *cop,
422 struct scatterlist *src_sg, struct scatterlist *dst_sg,
423 uint32_t len)
425 int ret;
427 /* Always hash before encryption and after decryption. Maybe
428 * we should introduce a flag to switch... TBD later on.
430 if (cop->op == COP_ENCRYPT) {
431 if (ses_ptr->hdata.init != 0) {
432 ret = cryptodev_hash_update(&ses_ptr->hdata,
433 src_sg, len);
434 if (unlikely(ret))
435 goto out_err;
437 if (ses_ptr->cdata.init != 0) {
438 ret = cryptodev_cipher_encrypt(&ses_ptr->cdata,
439 src_sg, dst_sg, len);
441 if (unlikely(ret))
442 goto out_err;
444 } else {
445 if (ses_ptr->cdata.init != 0) {
446 ret = cryptodev_cipher_decrypt(&ses_ptr->cdata,
447 src_sg, dst_sg, len);
449 if (unlikely(ret))
450 goto out_err;
453 if (ses_ptr->hdata.init != 0) {
454 ret = cryptodev_hash_update(&ses_ptr->hdata,
455 dst_sg, len);
456 if (unlikely(ret))
457 goto out_err;
460 return 0;
461 out_err:
462 dprintk(0, KERN_ERR, "CryptoAPI failure: %d\n", ret);
463 return ret;
467 /* This is the main crypto function - feed it with plaintext
468 and get a ciphertext (or vice versa :-) */
469 static int
470 __crypto_run_std(struct csession *ses_ptr, struct crypt_op *cop)
472 char *data;
473 char __user *src, *dst;
474 struct scatterlist sg;
475 size_t nbytes, bufsize;
476 int ret = 0;
478 nbytes = cop->len;
479 data = (char *)__get_free_page(GFP_KERNEL);
481 if (unlikely(!data))
482 return -ENOMEM;
484 bufsize = PAGE_SIZE < nbytes ? PAGE_SIZE : nbytes;
486 src = cop->src;
487 dst = cop->dst;
489 while (nbytes > 0) {
490 size_t current_len = nbytes > bufsize ? bufsize : nbytes;
492 if (unlikely(copy_from_user(data, src, current_len))) {
493 ret = -EFAULT;
494 break;
497 sg_init_one(&sg, data, current_len);
499 ret = hash_n_crypt(ses_ptr, cop, &sg, &sg, current_len);
501 if (unlikely(ret))
502 break;
504 if (ses_ptr->cdata.init != 0) {
505 if (unlikely(copy_to_user(dst, data, current_len))) {
506 ret = -EFAULT;
507 break;
511 dst += current_len;
512 nbytes -= current_len;
513 src += current_len;
516 free_page((unsigned long)data);
517 return ret;
520 void release_user_pages(struct page **pg, int pagecount)
522 while (pagecount--) {
523 if (!PageReserved(pg[pagecount]))
524 SetPageDirty(pg[pagecount]);
525 page_cache_release(pg[pagecount]);
529 /* offset of buf in it's first page */
530 #define PAGEOFFSET(buf) ((unsigned long)buf & ~PAGE_MASK)
532 /* fetch the pages addr resides in into pg and initialise sg with them */
533 int __get_userbuf(uint8_t __user *addr, uint32_t len, int write,
534 int pgcount, struct page **pg, struct scatterlist *sg,
535 struct task_struct *task, struct mm_struct *mm)
537 int ret, pglen, i = 0;
538 struct scatterlist *sgp;
540 down_write(&mm->mmap_sem);
541 ret = get_user_pages(task, mm,
542 (unsigned long)addr, pgcount, write, 0, pg, NULL);
543 up_write(&mm->mmap_sem);
544 if (ret != pgcount)
545 return -EINVAL;
547 sg_init_table(sg, pgcount);
549 pglen = min((ptrdiff_t)(PAGE_SIZE - PAGEOFFSET(addr)), (ptrdiff_t)len);
550 sg_set_page(sg, pg[i++], pglen, PAGEOFFSET(addr));
552 len -= pglen;
553 for (sgp = sg_next(sg); len; sgp = sg_next(sgp)) {
554 pglen = min((uint32_t)PAGE_SIZE, len);
555 sg_set_page(sgp, pg[i++], pglen, 0);
556 len -= pglen;
558 sg_mark_end(sg_last(sg, pgcount));
559 return 0;
562 int adjust_sg_array(struct csession * ses, int pagecount)
564 struct scatterlist *sg;
565 struct page **pages;
566 int array_size;
568 for (array_size = ses->array_size; array_size < pagecount;
569 array_size *= 2)
572 dprintk(2, KERN_DEBUG, "%s: reallocating to %d elements\n",
573 __func__, array_size);
574 pages = krealloc(ses->pages, array_size * sizeof(struct page *),
575 GFP_KERNEL);
576 if (unlikely(!pages))
577 return -ENOMEM;
578 ses->pages = pages;
579 sg = krealloc(ses->sg, array_size * sizeof(struct scatterlist),
580 GFP_KERNEL);
581 if (unlikely(!sg))
582 return -ENOMEM;
583 ses->sg = sg;
584 ses->array_size = array_size;
586 return 0;
589 /* make cop->src and cop->dst available in scatterlists */
590 static int get_userbuf(struct csession *ses, struct kernel_crypt_op *kcop,
591 struct scatterlist **src_sg, struct scatterlist **dst_sg,
592 int *tot_pages)
594 int src_pagecount, dst_pagecount = 0, pagecount, write_src = 1;
595 struct crypt_op *cop = &kcop->cop;
596 int rc;
598 if (cop->src == NULL)
599 return -EINVAL;
601 if (ses->alignmask && !IS_ALIGNED((unsigned long)cop->src, ses->alignmask)) {
602 dprintk(2, KERN_WARNING, "%s: careful - source address %lx is not %d byte aligned\n",
603 __func__, (unsigned long)cop->src, ses->alignmask + 1);
606 src_pagecount = PAGECOUNT(cop->src, cop->len);
607 if (!ses->cdata.init) { /* hashing only */
608 write_src = 0;
609 } else if (cop->src != cop->dst) { /* non-in-situ transformation */
610 if (cop->dst == NULL)
611 return -EINVAL;
613 dst_pagecount = PAGECOUNT(cop->dst, cop->len);
614 write_src = 0;
616 if (ses->alignmask && !IS_ALIGNED((unsigned long)cop->dst, ses->alignmask)) {
617 dprintk(2, KERN_WARNING, "%s: careful - destination address %lx is not %d byte aligned\n",
618 __func__, (unsigned long)cop->dst, ses->alignmask + 1);
622 (*tot_pages) = pagecount = src_pagecount + dst_pagecount;
624 if (pagecount > ses->array_size) {
625 rc = adjust_sg_array(ses, pagecount);
626 if (rc)
627 return rc;
630 rc = __get_userbuf(cop->src, cop->len, write_src, src_pagecount,
631 ses->pages, ses->sg, kcop->task, kcop->mm);
632 if (unlikely(rc)) {
633 dprintk(1, KERN_ERR,
634 "failed to get user pages for data input\n");
635 return -EINVAL;
637 (*src_sg) = (*dst_sg) = ses->sg;
639 if (!dst_pagecount)
640 return 0;
642 (*dst_sg) = ses->sg + src_pagecount;
644 rc = __get_userbuf(cop->dst, cop->len, 1, dst_pagecount,
645 ses->pages + src_pagecount, *dst_sg,
646 kcop->task, kcop->mm);
647 if (unlikely(rc)) {
648 dprintk(1, KERN_ERR,
649 "failed to get user pages for data output\n");
650 release_user_pages(ses->pages, src_pagecount);
651 return -EINVAL;
653 return 0;
656 /* This is the main crypto function - zero-copy edition */
657 static int
658 __crypto_run_zc(struct csession *ses_ptr, struct kernel_crypt_op *kcop)
660 struct scatterlist *src_sg, *dst_sg;
661 struct crypt_op *cop = &kcop->cop;
662 int ret = 0, pagecount;
664 ret = get_userbuf(ses_ptr, kcop, &src_sg, &dst_sg, &pagecount);
665 if (unlikely(ret)) {
666 dprintk(1, KERN_ERR, "Error getting user pages. "
667 "Falling back to non zero copy.\n");
668 return __crypto_run_std(ses_ptr, cop);
671 ret = hash_n_crypt(ses_ptr, cop, src_sg, dst_sg, cop->len);
673 release_user_pages(ses_ptr->pages, pagecount);
674 return ret;
677 static int crypto_run(struct fcrypt *fcr, struct kernel_crypt_op *kcop)
679 struct csession *ses_ptr;
680 struct crypt_op *cop = &kcop->cop;
681 int ret;
683 if (unlikely(cop->op != COP_ENCRYPT && cop->op != COP_DECRYPT)) {
684 dprintk(1, KERN_DEBUG, "invalid operation op=%u\n", cop->op);
685 return -EINVAL;
688 /* this also enters ses_ptr->sem */
689 ses_ptr = crypto_get_session_by_sid(fcr, cop->ses);
690 if (unlikely(!ses_ptr)) {
691 dprintk(1, KERN_ERR, "invalid session ID=0x%08X\n", cop->ses);
692 return -EINVAL;
695 if (ses_ptr->hdata.init != 0 && !(cop->flags & (COP_FLAG_UPDATE | COP_FLAG_FINAL))) {
696 ret = cryptodev_hash_reset(&ses_ptr->hdata);
697 if (unlikely(ret)) {
698 dprintk(1, KERN_ERR,
699 "error in cryptodev_hash_reset()\n");
700 goto out_unlock;
704 if (ses_ptr->cdata.init != 0) {
705 int blocksize = ses_ptr->cdata.blocksize;
707 if (unlikely(cop->len % blocksize)) {
708 dprintk(1, KERN_ERR,
709 "data size (%u) isn't a multiple "
710 "of block size (%u)\n",
711 cop->len, blocksize);
712 ret = -EINVAL;
713 goto out_unlock;
716 cryptodev_cipher_set_iv(&ses_ptr->cdata, kcop->iv,
717 min(ses_ptr->cdata.ivsize, kcop->ivlen));
720 if (likely(cop->len)) {
721 if (cop->flags & COP_FLAG_NO_ZC)
722 ret = __crypto_run_std(ses_ptr, &kcop->cop);
723 else
724 ret = __crypto_run_zc(ses_ptr, kcop);
725 if (unlikely(ret))
726 goto out_unlock;
729 if (ses_ptr->cdata.init != 0) {
730 cryptodev_cipher_get_iv(&ses_ptr->cdata, kcop->iv,
731 min(ses_ptr->cdata.ivsize, kcop->ivlen));
734 if (ses_ptr->hdata.init != 0 &&
735 ((cop->flags & COP_FLAG_FINAL) ||
736 (!(cop->flags & COP_FLAG_UPDATE) || cop->len == 0))) {
738 ret = cryptodev_hash_final(&ses_ptr->hdata, kcop->hash_output);
739 if (unlikely(ret)) {
740 dprintk(0, KERN_ERR, "CryptoAPI failure: %d\n", ret);
741 goto out_unlock;
743 kcop->digestsize = ses_ptr->hdata.digestsize;
746 #if defined(CRYPTODEV_STATS)
747 if (enable_stats) {
748 /* this is safe - we check cop->op at the function entry */
749 ses_ptr->stat[cop->op] += cop->len;
750 if (ses_ptr->stat_max_size < cop->len)
751 ses_ptr->stat_max_size = cop->len;
752 ses_ptr->stat_count++;
754 #endif
756 out_unlock:
757 crypto_put_session(ses_ptr);
758 return ret;
761 static void cryptask_routine(struct work_struct *work)
763 struct crypt_priv *pcr = container_of(work, struct crypt_priv, cryptask);
764 struct todo_list_item *item;
765 LIST_HEAD(tmp);
767 /* fetch all pending jobs into the temporary list */
768 mutex_lock(&pcr->todo.lock);
769 list_cut_position(&tmp, &pcr->todo.list, pcr->todo.list.prev);
770 mutex_unlock(&pcr->todo.lock);
772 /* handle each job locklessly */
773 list_for_each_entry(item, &tmp, __hook) {
774 item->result = crypto_run(&pcr->fcrypt, &item->kcop);
775 if (unlikely(item->result))
776 dprintk(0, KERN_ERR, "%s: crypto_run() failed: %d\n",
777 __func__, item->result);
780 /* push all handled jobs to the done list at once */
781 mutex_lock(&pcr->done.lock);
782 list_splice_tail(&tmp, &pcr->done.list);
783 mutex_unlock(&pcr->done.lock);
785 /* wake for POLLIN */
786 wake_up_interruptible(&pcr->user_waiter);
789 /* ====== /dev/crypto ====== */
791 static int
792 cryptodev_open(struct inode *inode, struct file *filp)
794 struct todo_list_item *tmp;
795 struct crypt_priv *pcr;
796 int i;
798 pcr = kmalloc(sizeof(*pcr), GFP_KERNEL);
799 if (!pcr)
800 return -ENOMEM;
802 memset(pcr, 0, sizeof(*pcr));
803 mutex_init(&pcr->fcrypt.sem);
804 INIT_LIST_HEAD(&pcr->fcrypt.list);
806 INIT_LIST_HEAD(&pcr->free.list);
807 INIT_LIST_HEAD(&pcr->todo.list);
808 INIT_LIST_HEAD(&pcr->done.list);
809 INIT_WORK(&pcr->cryptask, cryptask_routine);
810 mutex_init(&pcr->free.lock);
811 mutex_init(&pcr->todo.lock);
812 mutex_init(&pcr->done.lock);
813 init_waitqueue_head(&pcr->user_waiter);
815 for (i = 0; i < DEF_COP_RINGSIZE; i++) {
816 tmp = kzalloc(sizeof(struct todo_list_item), GFP_KERNEL);
817 pcr->itemcount++;
818 dprintk(2, KERN_DEBUG, "%s: allocated new item at %lx\n",
819 __func__, (unsigned long)tmp);
820 list_add(&tmp->__hook, &pcr->free.list);
823 filp->private_data = pcr;
824 dprintk(2, KERN_DEBUG,
825 "Cryptodev handle initialised, %d elements in queue\n",
826 DEF_COP_RINGSIZE);
827 return 0;
830 static int
831 cryptodev_release(struct inode *inode, struct file *filp)
833 struct crypt_priv *pcr = filp->private_data;
834 struct todo_list_item *item, *item_safe;
835 int items_freed = 0;
837 if (!pcr)
838 return 0;
840 cancel_work_sync(&pcr->cryptask);
842 mutex_destroy(&pcr->todo.lock);
843 mutex_destroy(&pcr->done.lock);
844 mutex_destroy(&pcr->free.lock);
846 list_splice_tail(&pcr->todo.list, &pcr->free.list);
847 list_splice_tail(&pcr->done.list, &pcr->free.list);
849 list_for_each_entry_safe(item, item_safe, &pcr->free.list, __hook) {
850 dprintk(2, KERN_DEBUG, "%s: freeing item at %lx\n",
851 __func__, (unsigned long)item);
852 list_del(&item->__hook);
853 kfree(item);
854 items_freed++;
857 if (items_freed != pcr->itemcount) {
858 dprintk(0, KERN_ERR,
859 "%s: freed %d items, but %d should exist!\n",
860 __func__, items_freed, pcr->itemcount);
863 crypto_finish_all_sessions(&pcr->fcrypt);
864 kfree(pcr);
865 filp->private_data = NULL;
867 dprintk(2, KERN_DEBUG,
868 "Cryptodev handle deinitialised, %d elements freed\n",
869 items_freed);
870 return 0;
873 static int
874 clonefd(struct file *filp)
876 int ret;
877 ret = get_unused_fd();
878 if (ret >= 0) {
879 get_file(filp);
880 fd_install(ret, filp);
883 return ret;
886 /* enqueue a job for asynchronous completion
888 * returns:
889 * -EBUSY when there are no free queue slots left
890 * (and the number of slots has reached it MAX_COP_RINGSIZE)
891 * -EFAULT when there was a memory allocation error
892 * 0 on success */
893 static int crypto_async_run(struct crypt_priv *pcr, struct kernel_crypt_op *kcop)
895 struct todo_list_item *item = NULL;
897 mutex_lock(&pcr->free.lock);
898 if (likely(!list_empty(&pcr->free.list))) {
899 item = list_first_entry(&pcr->free.list,
900 struct todo_list_item, __hook);
901 list_del(&item->__hook);
902 } else if (pcr->itemcount < MAX_COP_RINGSIZE) {
903 pcr->itemcount++;
904 } else {
905 mutex_unlock(&pcr->free.lock);
906 return -EBUSY;
908 mutex_unlock(&pcr->free.lock);
910 if (unlikely(!item)) {
911 item = kzalloc(sizeof(struct todo_list_item), GFP_KERNEL);
912 if (unlikely(!item))
913 return -EFAULT;
914 dprintk(1, KERN_INFO, "%s: increased item count to %d\n",
915 __func__, pcr->itemcount);
918 memcpy(&item->kcop, kcop, sizeof(struct kernel_crypt_op));
920 mutex_lock(&pcr->todo.lock);
921 list_add_tail(&item->__hook, &pcr->todo.list);
922 mutex_unlock(&pcr->todo.lock);
924 queue_work(cryptodev_wq, &pcr->cryptask);
925 return 0;
928 /* get the first completed job from the "done" queue
930 * returns:
931 * -EBUSY if no completed jobs are ready (yet)
932 * the return value of crypto_run() otherwise */
933 static int crypto_async_fetch(struct crypt_priv *pcr,
934 struct kernel_crypt_op *kcop)
936 struct todo_list_item *item;
937 int retval;
939 mutex_lock(&pcr->done.lock);
940 if (list_empty(&pcr->done.list)) {
941 mutex_unlock(&pcr->done.lock);
942 return -EBUSY;
944 item = list_first_entry(&pcr->done.list, struct todo_list_item, __hook);
945 list_del(&item->__hook);
946 mutex_unlock(&pcr->done.lock);
948 memcpy(kcop, &item->kcop, sizeof(struct kernel_crypt_op));
949 retval = item->result;
951 mutex_lock(&pcr->free.lock);
952 list_add_tail(&item->__hook, &pcr->free.list);
953 mutex_unlock(&pcr->free.lock);
955 /* wake for POLLOUT */
956 wake_up_interruptible(&pcr->user_waiter);
958 return retval;
961 /* this function has to be called from process context */
962 static int fill_kcop_from_cop(struct kernel_crypt_op *kcop, struct fcrypt *fcr)
964 struct crypt_op *cop = &kcop->cop;
965 struct csession *ses_ptr;
966 int rc;
968 /* this also enters ses_ptr->sem */
969 ses_ptr = crypto_get_session_by_sid(fcr, cop->ses);
970 if (unlikely(!ses_ptr)) {
971 dprintk(1, KERN_ERR, "invalid session ID=0x%08X\n", cop->ses);
972 return -EINVAL;
974 kcop->ivlen = cop->iv ? ses_ptr->cdata.ivsize : 0;
975 kcop->digestsize = 0; /* will be updated during operation */
977 crypto_put_session(ses_ptr);
979 kcop->task = current;
980 kcop->mm = current->mm;
982 if (cop->iv) {
983 rc = copy_from_user(kcop->iv, cop->iv, kcop->ivlen);
984 if (unlikely(rc)) {
985 dprintk(1, KERN_ERR,
986 "error copying IV (%d bytes), copy_from_user returned %d for address %lx\n",
987 kcop->ivlen, rc, (unsigned long)cop->iv);
988 return -EFAULT;
992 return 0;
995 /* this function has to be called from process context */
996 static int fill_cop_from_kcop(struct kernel_crypt_op *kcop, struct fcrypt *fcr)
998 int ret;
1000 if (kcop->digestsize) {
1001 ret = copy_to_user(kcop->cop.mac,
1002 kcop->hash_output, kcop->digestsize);
1003 if (unlikely(ret))
1004 return -EFAULT;
1006 if (kcop->ivlen && kcop->cop.flags & COP_FLAG_WRITE_IV) {
1007 ret = copy_to_user(kcop->cop.iv,
1008 kcop->iv, kcop->ivlen);
1009 if (unlikely(ret))
1010 return -EFAULT;
1012 return 0;
1015 static int kcop_from_user(struct kernel_crypt_op *kcop,
1016 struct fcrypt *fcr, void __user *arg)
1018 if (unlikely(copy_from_user(&kcop->cop, arg, sizeof(kcop->cop))))
1019 return -EFAULT;
1021 return fill_kcop_from_cop(kcop, fcr);
1024 static int kcop_to_user(struct kernel_crypt_op *kcop,
1025 struct fcrypt *fcr, void __user *arg)
1027 int ret;
1029 ret = fill_cop_from_kcop(kcop, fcr);
1030 if (unlikely(ret))
1031 return ret;
1033 if (unlikely(copy_to_user(arg, &kcop->cop, sizeof(kcop->cop))))
1034 return -EFAULT;
1035 return 0;
1038 static inline void tfm_info_to_alg_info(struct alg_info *dst, struct crypto_tfm *tfm)
1040 snprintf(dst->cra_name, CRYPTODEV_MAX_ALG_NAME,
1041 "%s", crypto_tfm_alg_name(tfm));
1042 snprintf(dst->cra_driver_name, CRYPTODEV_MAX_ALG_NAME,
1043 "%s", crypto_tfm_alg_driver_name(tfm));
1046 static int get_session_info(struct fcrypt *fcr, struct session_info_op *siop)
1048 struct csession *ses_ptr;
1050 /* this also enters ses_ptr->sem */
1051 ses_ptr = crypto_get_session_by_sid(fcr, siop->ses);
1052 if (unlikely(!ses_ptr)) {
1053 dprintk(1, KERN_ERR, "invalid session ID=0x%08X\n", siop->ses);
1054 return -EINVAL;
1057 if (ses_ptr->cdata.init) {
1058 tfm_info_to_alg_info(&siop->cipher_info,
1059 crypto_ablkcipher_tfm(ses_ptr->cdata.async.s));
1061 if (ses_ptr->hdata.init) {
1062 tfm_info_to_alg_info(&siop->hash_info,
1063 crypto_ahash_tfm(ses_ptr->hdata.async.s));
1066 siop->alignmask = ses_ptr->alignmask;
1068 crypto_put_session(ses_ptr);
1069 return 0;
1072 static long
1073 cryptodev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg_)
1075 void __user *arg = (void __user *)arg_;
1076 int __user *p = arg;
1077 struct session_op sop;
1078 struct kernel_crypt_op kcop;
1079 struct kernel_crypt_auth_op kcaop;
1080 struct crypt_priv *pcr = filp->private_data;
1081 struct fcrypt *fcr;
1082 struct session_info_op siop;
1083 uint32_t ses;
1084 int ret, fd;
1086 if (unlikely(!pcr))
1087 BUG();
1089 fcr = &pcr->fcrypt;
1091 switch (cmd) {
1092 case CIOCASYMFEAT:
1093 return put_user(0, p);
1094 case CRIOGET:
1095 fd = clonefd(filp);
1096 ret = put_user(fd, p);
1097 if (unlikely(ret)) {
1098 sys_close(fd);
1099 return ret;
1101 return ret;
1102 case CIOCGSESSION:
1103 if (unlikely(copy_from_user(&sop, arg, sizeof(sop))))
1104 return -EFAULT;
1106 ret = crypto_create_session(fcr, &sop);
1107 if (unlikely(ret))
1108 return ret;
1109 ret = copy_to_user(arg, &sop, sizeof(sop));
1110 if (unlikely(ret)) {
1111 crypto_finish_session(fcr, sop.ses);
1112 return -EFAULT;
1114 return ret;
1115 case CIOCFSESSION:
1116 ret = get_user(ses, (uint32_t __user *)arg);
1117 if (unlikely(ret))
1118 return ret;
1119 ret = crypto_finish_session(fcr, ses);
1120 return ret;
1121 case CIOCGSESSINFO:
1122 if (unlikely(copy_from_user(&siop, arg, sizeof(siop))))
1123 return -EFAULT;
1125 ret = get_session_info(fcr, &siop);
1126 if (unlikely(ret))
1127 return ret;
1128 return copy_to_user(arg, &siop, sizeof(siop));
1129 case CIOCCRYPT:
1130 if (unlikely(ret = kcop_from_user(&kcop, fcr, arg))) {
1131 dprintk(1, KERN_WARNING, "Error copying from user");
1132 return ret;
1135 ret = crypto_run(fcr, &kcop);
1136 if (unlikely(ret)) {
1137 dprintk(1, KERN_WARNING, "Error in crypto_run");
1138 return ret;
1141 return kcop_to_user(&kcop, fcr, arg);
1142 case CIOCAUTHCRYPT:
1143 if (unlikely(ret = kcaop_from_user(&kcaop, fcr, arg))) {
1144 dprintk(1, KERN_WARNING, "Error copying from user");
1145 return ret;
1148 ret = crypto_auth_run(fcr, &kcaop);
1149 if (unlikely(ret)) {
1150 dprintk(1, KERN_WARNING, "Error in crypto_auth_run");
1151 return ret;
1153 return kcaop_to_user(&kcaop, fcr, arg);
1154 case CIOCASYNCCRYPT:
1155 if (unlikely(ret = kcop_from_user(&kcop, fcr, arg)))
1156 return ret;
1158 return crypto_async_run(pcr, &kcop);
1159 case CIOCASYNCFETCH:
1160 ret = crypto_async_fetch(pcr, &kcop);
1161 if (unlikely(ret))
1162 return ret;
1164 return kcop_to_user(&kcop, fcr, arg);
1165 default:
1166 return -EINVAL;
1170 /* compatibility code for 32bit userlands */
1171 #ifdef CONFIG_COMPAT
1173 static inline void
1174 compat_to_session_op(struct compat_session_op *compat, struct session_op *sop)
1176 sop->cipher = compat->cipher;
1177 sop->mac = compat->mac;
1178 sop->keylen = compat->keylen;
1180 sop->key = compat_ptr(compat->key);
1181 sop->mackeylen = compat->mackeylen;
1182 sop->mackey = compat_ptr(compat->mackey);
1183 sop->ses = compat->ses;
1186 static inline void
1187 session_op_to_compat(struct session_op *sop, struct compat_session_op *compat)
1189 compat->cipher = sop->cipher;
1190 compat->mac = sop->mac;
1191 compat->keylen = sop->keylen;
1193 compat->key = ptr_to_compat(sop->key);
1194 compat->mackeylen = sop->mackeylen;
1195 compat->mackey = ptr_to_compat(sop->mackey);
1196 compat->ses = sop->ses;
1199 static inline void
1200 compat_to_crypt_op(struct compat_crypt_op *compat, struct crypt_op *cop)
1202 cop->ses = compat->ses;
1203 cop->op = compat->op;
1204 cop->flags = compat->flags;
1205 cop->len = compat->len;
1207 cop->src = compat_ptr(compat->src);
1208 cop->dst = compat_ptr(compat->dst);
1209 cop->mac = compat_ptr(compat->mac);
1210 cop->iv = compat_ptr(compat->iv);
1213 static inline void
1214 crypt_op_to_compat(struct crypt_op *cop, struct compat_crypt_op *compat)
1216 compat->ses = cop->ses;
1217 compat->op = cop->op;
1218 compat->flags = cop->flags;
1219 compat->len = cop->len;
1221 compat->src = ptr_to_compat(cop->src);
1222 compat->dst = ptr_to_compat(cop->dst);
1223 compat->mac = ptr_to_compat(cop->mac);
1224 compat->iv = ptr_to_compat(cop->iv);
1227 static int compat_kcop_from_user(struct kernel_crypt_op *kcop,
1228 struct fcrypt *fcr, void __user *arg)
1230 struct compat_crypt_op compat_cop;
1232 if (unlikely(copy_from_user(&compat_cop, arg, sizeof(compat_cop))))
1233 return -EFAULT;
1234 compat_to_crypt_op(&compat_cop, &kcop->cop);
1236 return fill_kcop_from_cop(kcop, fcr);
1239 static int compat_kcop_to_user(struct kernel_crypt_op *kcop,
1240 struct fcrypt *fcr, void __user *arg)
1242 int ret;
1243 struct compat_crypt_op compat_cop;
1245 ret = fill_cop_from_kcop(kcop, fcr);
1246 if (unlikely(ret)) {
1247 dprintk(1, KERN_WARNING, "Error in fill_cop_from_kcop");
1248 return ret;
1250 crypt_op_to_compat(&kcop->cop, &compat_cop);
1252 if (unlikely(copy_to_user(arg, &compat_cop, sizeof(compat_cop)))) {
1253 dprintk(1, KERN_WARNING, "Error copying to user");
1254 return -EFAULT;
1256 return 0;
1259 static long
1260 cryptodev_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg_)
1262 void __user *arg = (void __user *)arg_;
1263 struct crypt_priv *pcr = file->private_data;
1264 struct fcrypt *fcr;
1265 struct session_op sop;
1266 struct compat_session_op compat_sop;
1267 struct kernel_crypt_op kcop;
1268 int ret;
1270 if (unlikely(!pcr))
1271 BUG();
1273 fcr = &pcr->fcrypt;
1275 switch (cmd) {
1276 case CIOCASYMFEAT:
1277 case CRIOGET:
1278 case CIOCFSESSION:
1279 case CIOCGSESSINFO:
1280 return cryptodev_ioctl(file, cmd, arg_);
1282 case COMPAT_CIOCGSESSION:
1283 if (unlikely(copy_from_user(&compat_sop, arg,
1284 sizeof(compat_sop))))
1285 return -EFAULT;
1286 compat_to_session_op(&compat_sop, &sop);
1288 ret = crypto_create_session(fcr, &sop);
1289 if (unlikely(ret))
1290 return ret;
1292 session_op_to_compat(&sop, &compat_sop);
1293 ret = copy_to_user(arg, &compat_sop, sizeof(compat_sop));
1294 if (unlikely(ret)) {
1295 crypto_finish_session(fcr, sop.ses);
1296 return -EFAULT;
1298 return ret;
1300 case COMPAT_CIOCCRYPT:
1301 ret = compat_kcop_from_user(&kcop, fcr, arg);
1302 if (unlikely(ret))
1303 return ret;
1305 ret = crypto_run(fcr, &kcop);
1306 if (unlikely(ret))
1307 return ret;
1309 return compat_kcop_to_user(&kcop, fcr, arg);
1310 case COMPAT_CIOCASYNCCRYPT:
1311 if (unlikely(ret = compat_kcop_from_user(&kcop, fcr, arg)))
1312 return ret;
1314 return crypto_async_run(pcr, &kcop);
1315 case COMPAT_CIOCASYNCFETCH:
1316 ret = crypto_async_fetch(pcr, &kcop);
1317 if (unlikely(ret))
1318 return ret;
1320 return compat_kcop_to_user(&kcop, fcr, arg);
1322 default:
1323 return -EINVAL;
1327 #endif /* CONFIG_COMPAT */
1329 static unsigned int cryptodev_poll(struct file *file, poll_table *wait)
1331 struct crypt_priv *pcr = file->private_data;
1332 int ret = 0;
1334 poll_wait(file, &pcr->user_waiter, wait);
1336 if (!list_empty_careful(&pcr->done.list))
1337 ret |= POLLIN | POLLRDNORM;
1338 if (!list_empty_careful(&pcr->free.list) || pcr->itemcount < MAX_COP_RINGSIZE)
1339 ret |= POLLOUT | POLLWRNORM;
1341 return ret;
1344 static const struct file_operations cryptodev_fops = {
1345 .owner = THIS_MODULE,
1346 .open = cryptodev_open,
1347 .release = cryptodev_release,
1348 .unlocked_ioctl = cryptodev_ioctl,
1349 #ifdef CONFIG_COMPAT
1350 .compat_ioctl = cryptodev_compat_ioctl,
1351 #endif /* CONFIG_COMPAT */
1352 .poll = cryptodev_poll,
1355 static struct miscdevice cryptodev = {
1356 .minor = MISC_DYNAMIC_MINOR,
1357 .name = "crypto",
1358 .fops = &cryptodev_fops,
1361 static int __init
1362 cryptodev_register(void)
1364 int rc;
1366 rc = misc_register(&cryptodev);
1367 if (unlikely(rc)) {
1368 printk(KERN_ERR PFX "registration of /dev/crypto failed\n");
1369 return rc;
1372 return 0;
1375 static void __exit
1376 cryptodev_deregister(void)
1378 misc_deregister(&cryptodev);
1381 /* ====== Module init/exit ====== */
1382 static int __init init_cryptodev(void)
1384 int rc;
1386 cryptodev_wq = create_workqueue("cryptodev_queue");
1387 if (unlikely(!cryptodev_wq)) {
1388 printk(KERN_ERR PFX "failed to allocate the cryptodev workqueue\n");
1389 return -EFAULT;
1392 rc = cryptodev_register();
1393 if (unlikely(rc)) {
1394 destroy_workqueue(cryptodev_wq);
1395 return rc;
1398 printk(KERN_INFO PFX "driver %s loaded.\n", VERSION);
1400 return 0;
1403 static void __exit exit_cryptodev(void)
1405 flush_workqueue(cryptodev_wq);
1406 destroy_workqueue(cryptodev_wq);
1408 cryptodev_deregister();
1409 printk(KERN_INFO PFX "driver unloaded.\n");
1412 module_init(init_cryptodev);
1413 module_exit(exit_cryptodev);