bumped versions
[cryptodev-linux.git] / ioctl.c
blobd98bf28aad2560cfe8b9803f4ed3f70f4f6be80a
1 /*
2 * Driver for /dev/crypto device (aka CryptoDev)
4 * Copyright (c) 2004 Michal Ludvig <mludvig@logix.net.nz>, SuSE Labs
5 * Copyright (c) 2009,2010,2011 Nikos Mavrogiannopoulos <nmav@gnutls.org>
6 * Copyright (c) 2010 Phil Sutter
8 * This file is part of linux cryptodev.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc.,
23 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
27 * Device /dev/crypto provides an interface for
28 * accessing kernel CryptoAPI algorithms (ciphers,
29 * hashes) from userspace programs.
31 * /dev/crypto interface was originally introduced in
32 * OpenBSD and this module attempts to keep the API.
36 #include <crypto/hash.h>
37 #include <linux/crypto.h>
38 #include <linux/mm.h>
39 #include <linux/highmem.h>
40 #include <linux/ioctl.h>
41 #include <linux/random.h>
42 #include <linux/syscalls.h>
43 #include <linux/pagemap.h>
44 #include <linux/poll.h>
45 #include <linux/uaccess.h>
46 #include <crypto/cryptodev.h>
47 #include <linux/scatterlist.h>
48 #include "cryptodev_int.h"
49 #include "zc.h"
50 #include "version.h"
52 MODULE_AUTHOR("Nikos Mavrogiannopoulos <nmav@gnutls.org>");
53 MODULE_DESCRIPTION("CryptoDev driver");
54 MODULE_LICENSE("GPL");
56 /* ====== Compile-time config ====== */
58 /* Default (pre-allocated) and maximum size of the job queue.
59 * These are free, pending and done items all together. */
60 #define DEF_COP_RINGSIZE 16
61 #define MAX_COP_RINGSIZE 64
63 /* ====== Module parameters ====== */
65 int cryptodev_verbosity;
66 module_param(cryptodev_verbosity, int, 0644);
67 MODULE_PARM_DESC(cryptodev_verbosity, "0: normal, 1: verbose, 2: debug");
69 /* ====== CryptoAPI ====== */
70 struct todo_list_item {
71 struct list_head __hook;
72 struct kernel_crypt_op kcop;
73 int result;
76 struct locked_list {
77 struct list_head list;
78 struct mutex lock;
81 struct crypt_priv {
82 struct fcrypt fcrypt;
83 struct locked_list free, todo, done;
84 int itemcount;
85 struct work_struct cryptask;
86 wait_queue_head_t user_waiter;
89 #define FILL_SG(sg, ptr, len) \
90 do { \
91 (sg)->page = virt_to_page(ptr); \
92 (sg)->offset = offset_in_page(ptr); \
93 (sg)->length = len; \
94 (sg)->dma_address = 0; \
95 } while (0)
97 /* cryptodev's own workqueue, keeps crypto tasks from disturbing the force */
98 static struct workqueue_struct *cryptodev_wq;
100 /* Prepare session for future use. */
101 static int
102 crypto_create_session(struct fcrypt *fcr, struct session_op *sop)
104 struct csession *ses_new = NULL, *ses_ptr;
105 int ret = 0;
106 const char *alg_name = NULL;
107 const char *hash_name = NULL;
108 int hmac_mode = 1, stream = 0, aead = 0;
110 /* Does the request make sense? */
111 if (unlikely(!sop->cipher && !sop->mac)) {
112 dprintk(1, KERN_DEBUG, "Both 'cipher' and 'mac' unset.\n");
113 return -EINVAL;
116 switch (sop->cipher) {
117 case 0:
118 break;
119 case CRYPTO_DES_CBC:
120 alg_name = "cbc(des)";
121 break;
122 case CRYPTO_3DES_CBC:
123 alg_name = "cbc(des3_ede)";
124 break;
125 case CRYPTO_BLF_CBC:
126 alg_name = "cbc(blowfish)";
127 break;
128 case CRYPTO_AES_CBC:
129 alg_name = "cbc(aes)";
130 break;
131 case CRYPTO_AES_ECB:
132 alg_name = "ecb(aes)";
133 break;
134 case CRYPTO_CAMELLIA_CBC:
135 alg_name = "cbc(camellia)";
136 break;
137 case CRYPTO_AES_CTR:
138 alg_name = "ctr(aes)";
139 stream = 1;
140 break;
141 case CRYPTO_AES_GCM:
142 alg_name = "gcm(aes)";
143 stream = 1;
144 aead = 1;
145 break;
146 case CRYPTO_NULL:
147 alg_name = "ecb(cipher_null)";
148 stream = 1;
149 break;
150 default:
151 dprintk(1, KERN_DEBUG, "%s: bad cipher: %d\n", __func__,
152 sop->cipher);
153 return -EINVAL;
156 switch (sop->mac) {
157 case 0:
158 break;
159 case CRYPTO_MD5_HMAC:
160 hash_name = "hmac(md5)";
161 break;
162 case CRYPTO_RIPEMD160_HMAC:
163 hash_name = "hmac(rmd160)";
164 break;
165 case CRYPTO_SHA1_HMAC:
166 hash_name = "hmac(sha1)";
167 break;
168 case CRYPTO_SHA2_256_HMAC:
169 hash_name = "hmac(sha256)";
170 break;
171 case CRYPTO_SHA2_384_HMAC:
172 hash_name = "hmac(sha384)";
173 break;
174 case CRYPTO_SHA2_512_HMAC:
175 hash_name = "hmac(sha512)";
176 break;
178 /* non-hmac cases */
179 case CRYPTO_MD5:
180 hash_name = "md5";
181 hmac_mode = 0;
182 break;
183 case CRYPTO_RIPEMD160:
184 hash_name = "rmd160";
185 hmac_mode = 0;
186 break;
187 case CRYPTO_SHA1:
188 hash_name = "sha1";
189 hmac_mode = 0;
190 break;
191 case CRYPTO_SHA2_256:
192 hash_name = "sha256";
193 hmac_mode = 0;
194 break;
195 case CRYPTO_SHA2_384:
196 hash_name = "sha384";
197 hmac_mode = 0;
198 break;
199 case CRYPTO_SHA2_512:
200 hash_name = "sha512";
201 hmac_mode = 0;
202 break;
203 default:
204 dprintk(1, KERN_DEBUG, "%s: bad mac: %d\n", __func__,
205 sop->mac);
206 return -EINVAL;
209 /* Create a session and put it to the list. */
210 ses_new = kzalloc(sizeof(*ses_new), GFP_KERNEL);
211 if (!ses_new)
212 return -ENOMEM;
214 /* Set-up crypto transform. */
215 if (alg_name) {
216 uint8_t keyp[CRYPTO_CIPHER_MAX_KEY_LEN];
218 if (unlikely(sop->keylen > CRYPTO_CIPHER_MAX_KEY_LEN)) {
219 dprintk(1, KERN_DEBUG,
220 "Setting key failed for %s-%zu.\n",
221 alg_name, (size_t)sop->keylen*8);
222 ret = -EINVAL;
223 goto error_cipher;
226 if (unlikely(copy_from_user(keyp, sop->key, sop->keylen))) {
227 ret = -EFAULT;
228 goto error_cipher;
231 ret = cryptodev_cipher_init(&ses_new->cdata, alg_name, keyp,
232 sop->keylen, stream, aead);
233 if (ret < 0) {
234 dprintk(1, KERN_DEBUG,
235 "%s: Failed to load cipher for %s\n",
236 __func__, alg_name);
237 ret = -EINVAL;
238 goto error_cipher;
242 if (hash_name && aead == 0) {
243 uint8_t keyp[CRYPTO_HMAC_MAX_KEY_LEN];
245 if (unlikely(sop->mackeylen > CRYPTO_HMAC_MAX_KEY_LEN)) {
246 dprintk(1, KERN_DEBUG,
247 "Setting key failed for %s-%zu.\n",
248 alg_name, (size_t)sop->mackeylen*8);
249 ret = -EINVAL;
250 goto error_hash;
253 if (sop->mackey && unlikely(copy_from_user(keyp, sop->mackey,
254 sop->mackeylen))) {
255 ret = -EFAULT;
256 goto error_hash;
259 ret = cryptodev_hash_init(&ses_new->hdata, hash_name, hmac_mode,
260 keyp, sop->mackeylen);
261 if (ret != 0) {
262 dprintk(1, KERN_DEBUG,
263 "%s: Failed to load hash for %s\n",
264 __func__, hash_name);
265 ret = -EINVAL;
266 goto error_hash;
270 ses_new->alignmask = max(ses_new->cdata.alignmask,
271 ses_new->hdata.alignmask);
272 dprintk(2, KERN_DEBUG, "%s: got alignmask %d\n", __func__, ses_new->alignmask);
274 ses_new->array_size = DEFAULT_PREALLOC_PAGES;
275 dprintk(2, KERN_DEBUG, "%s: preallocating for %d user pages\n",
276 __func__, ses_new->array_size);
277 ses_new->pages = kzalloc(ses_new->array_size *
278 sizeof(struct page *), GFP_KERNEL);
279 ses_new->sg = kzalloc(ses_new->array_size *
280 sizeof(struct scatterlist), GFP_KERNEL);
281 if (ses_new->sg == NULL || ses_new->pages == NULL) {
282 dprintk(0, KERN_DEBUG, "Memory error\n");
283 ret = -ENOMEM;
284 goto error_hash;
287 /* put the new session to the list */
288 get_random_bytes(&ses_new->sid, sizeof(ses_new->sid));
289 mutex_init(&ses_new->sem);
291 mutex_lock(&fcr->sem);
292 restart:
293 list_for_each_entry(ses_ptr, &fcr->list, entry) {
294 /* Check for duplicate SID */
295 if (unlikely(ses_new->sid == ses_ptr->sid)) {
296 get_random_bytes(&ses_new->sid, sizeof(ses_new->sid));
297 /* Unless we have a broken RNG this
298 shouldn't loop forever... ;-) */
299 goto restart;
303 list_add(&ses_new->entry, &fcr->list);
304 mutex_unlock(&fcr->sem);
306 /* Fill in some values for the user. */
307 sop->ses = ses_new->sid;
309 return 0;
311 error_hash:
312 cryptodev_cipher_deinit(&ses_new->cdata);
313 kfree(ses_new->sg);
314 kfree(ses_new->pages);
315 error_cipher:
316 kfree(ses_new);
318 return ret;
322 /* Everything that needs to be done when remowing a session. */
323 static inline void
324 crypto_destroy_session(struct csession *ses_ptr)
326 if (!mutex_trylock(&ses_ptr->sem)) {
327 dprintk(2, KERN_DEBUG, "Waiting for semaphore of sid=0x%08X\n",
328 ses_ptr->sid);
329 mutex_lock(&ses_ptr->sem);
331 dprintk(2, KERN_DEBUG, "Removed session 0x%08X\n", ses_ptr->sid);
332 cryptodev_cipher_deinit(&ses_ptr->cdata);
333 cryptodev_hash_deinit(&ses_ptr->hdata);
334 dprintk(2, KERN_DEBUG, "%s: freeing space for %d user pages\n",
335 __func__, ses_ptr->array_size);
336 kfree(ses_ptr->pages);
337 kfree(ses_ptr->sg);
338 mutex_unlock(&ses_ptr->sem);
339 kfree(ses_ptr);
342 /* Look up a session by ID and remove. */
343 static int
344 crypto_finish_session(struct fcrypt *fcr, uint32_t sid)
346 struct csession *tmp, *ses_ptr;
347 struct list_head *head;
348 int ret = 0;
350 mutex_lock(&fcr->sem);
351 head = &fcr->list;
352 list_for_each_entry_safe(ses_ptr, tmp, head, entry) {
353 if (ses_ptr->sid == sid) {
354 list_del(&ses_ptr->entry);
355 crypto_destroy_session(ses_ptr);
356 break;
360 if (unlikely(!ses_ptr)) {
361 dprintk(1, KERN_ERR, "Session with sid=0x%08X not found!\n",
362 sid);
363 ret = -ENOENT;
365 mutex_unlock(&fcr->sem);
367 return ret;
370 /* Remove all sessions when closing the file */
371 static int
372 crypto_finish_all_sessions(struct fcrypt *fcr)
374 struct csession *tmp, *ses_ptr;
375 struct list_head *head;
377 mutex_lock(&fcr->sem);
379 head = &fcr->list;
380 list_for_each_entry_safe(ses_ptr, tmp, head, entry) {
381 list_del(&ses_ptr->entry);
382 crypto_destroy_session(ses_ptr);
384 mutex_unlock(&fcr->sem);
386 return 0;
389 /* Look up session by session ID. The returned session is locked. */
390 struct csession *
391 crypto_get_session_by_sid(struct fcrypt *fcr, uint32_t sid)
393 struct csession *ses_ptr, *retval = NULL;
395 if (unlikely(fcr == NULL))
396 return NULL;
398 mutex_lock(&fcr->sem);
399 list_for_each_entry(ses_ptr, &fcr->list, entry) {
400 if (ses_ptr->sid == sid) {
401 mutex_lock(&ses_ptr->sem);
402 retval = ses_ptr;
403 break;
406 mutex_unlock(&fcr->sem);
408 return retval;
411 static void cryptask_routine(struct work_struct *work)
413 struct crypt_priv *pcr = container_of(work, struct crypt_priv, cryptask);
414 struct todo_list_item *item;
415 LIST_HEAD(tmp);
417 /* fetch all pending jobs into the temporary list */
418 mutex_lock(&pcr->todo.lock);
419 list_cut_position(&tmp, &pcr->todo.list, pcr->todo.list.prev);
420 mutex_unlock(&pcr->todo.lock);
422 /* handle each job locklessly */
423 list_for_each_entry(item, &tmp, __hook) {
424 item->result = crypto_run(&pcr->fcrypt, &item->kcop);
425 if (unlikely(item->result))
426 dprintk(0, KERN_ERR, "%s: crypto_run() failed: %d\n",
427 __func__, item->result);
430 /* push all handled jobs to the done list at once */
431 mutex_lock(&pcr->done.lock);
432 list_splice_tail(&tmp, &pcr->done.list);
433 mutex_unlock(&pcr->done.lock);
435 /* wake for POLLIN */
436 wake_up_interruptible(&pcr->user_waiter);
439 /* ====== /dev/crypto ====== */
441 static int
442 cryptodev_open(struct inode *inode, struct file *filp)
444 struct todo_list_item *tmp;
445 struct crypt_priv *pcr;
446 int i;
448 pcr = kmalloc(sizeof(*pcr), GFP_KERNEL);
449 if (!pcr)
450 return -ENOMEM;
452 memset(pcr, 0, sizeof(*pcr));
453 mutex_init(&pcr->fcrypt.sem);
454 INIT_LIST_HEAD(&pcr->fcrypt.list);
456 INIT_LIST_HEAD(&pcr->free.list);
457 INIT_LIST_HEAD(&pcr->todo.list);
458 INIT_LIST_HEAD(&pcr->done.list);
459 INIT_WORK(&pcr->cryptask, cryptask_routine);
460 mutex_init(&pcr->free.lock);
461 mutex_init(&pcr->todo.lock);
462 mutex_init(&pcr->done.lock);
463 init_waitqueue_head(&pcr->user_waiter);
465 for (i = 0; i < DEF_COP_RINGSIZE; i++) {
466 tmp = kzalloc(sizeof(struct todo_list_item), GFP_KERNEL);
467 pcr->itemcount++;
468 dprintk(2, KERN_DEBUG, "%s: allocated new item at %lx\n",
469 __func__, (unsigned long)tmp);
470 list_add(&tmp->__hook, &pcr->free.list);
473 filp->private_data = pcr;
474 dprintk(2, KERN_DEBUG,
475 "Cryptodev handle initialised, %d elements in queue\n",
476 DEF_COP_RINGSIZE);
477 return 0;
480 static int
481 cryptodev_release(struct inode *inode, struct file *filp)
483 struct crypt_priv *pcr = filp->private_data;
484 struct todo_list_item *item, *item_safe;
485 int items_freed = 0;
487 if (!pcr)
488 return 0;
490 cancel_work_sync(&pcr->cryptask);
492 mutex_destroy(&pcr->todo.lock);
493 mutex_destroy(&pcr->done.lock);
494 mutex_destroy(&pcr->free.lock);
496 list_splice_tail(&pcr->todo.list, &pcr->free.list);
497 list_splice_tail(&pcr->done.list, &pcr->free.list);
499 list_for_each_entry_safe(item, item_safe, &pcr->free.list, __hook) {
500 dprintk(2, KERN_DEBUG, "%s: freeing item at %lx\n",
501 __func__, (unsigned long)item);
502 list_del(&item->__hook);
503 kfree(item);
504 items_freed++;
507 if (items_freed != pcr->itemcount) {
508 dprintk(0, KERN_ERR,
509 "%s: freed %d items, but %d should exist!\n",
510 __func__, items_freed, pcr->itemcount);
513 crypto_finish_all_sessions(&pcr->fcrypt);
514 kfree(pcr);
515 filp->private_data = NULL;
517 dprintk(2, KERN_DEBUG,
518 "Cryptodev handle deinitialised, %d elements freed\n",
519 items_freed);
520 return 0;
523 static int
524 clonefd(struct file *filp)
526 int ret;
527 ret = get_unused_fd();
528 if (ret >= 0) {
529 get_file(filp);
530 fd_install(ret, filp);
533 return ret;
536 /* enqueue a job for asynchronous completion
538 * returns:
539 * -EBUSY when there are no free queue slots left
540 * (and the number of slots has reached it MAX_COP_RINGSIZE)
541 * -EFAULT when there was a memory allocation error
542 * 0 on success */
543 static int crypto_async_run(struct crypt_priv *pcr, struct kernel_crypt_op *kcop)
545 struct todo_list_item *item = NULL;
547 mutex_lock(&pcr->free.lock);
548 if (likely(!list_empty(&pcr->free.list))) {
549 item = list_first_entry(&pcr->free.list,
550 struct todo_list_item, __hook);
551 list_del(&item->__hook);
552 } else if (pcr->itemcount < MAX_COP_RINGSIZE) {
553 pcr->itemcount++;
554 } else {
555 mutex_unlock(&pcr->free.lock);
556 return -EBUSY;
558 mutex_unlock(&pcr->free.lock);
560 if (unlikely(!item)) {
561 item = kzalloc(sizeof(struct todo_list_item), GFP_KERNEL);
562 if (unlikely(!item))
563 return -EFAULT;
564 dprintk(1, KERN_INFO, "%s: increased item count to %d\n",
565 __func__, pcr->itemcount);
568 memcpy(&item->kcop, kcop, sizeof(struct kernel_crypt_op));
570 mutex_lock(&pcr->todo.lock);
571 list_add_tail(&item->__hook, &pcr->todo.list);
572 mutex_unlock(&pcr->todo.lock);
574 queue_work(cryptodev_wq, &pcr->cryptask);
575 return 0;
578 /* get the first completed job from the "done" queue
580 * returns:
581 * -EBUSY if no completed jobs are ready (yet)
582 * the return value of crypto_run() otherwise */
583 static int crypto_async_fetch(struct crypt_priv *pcr,
584 struct kernel_crypt_op *kcop)
586 struct todo_list_item *item;
587 int retval;
589 mutex_lock(&pcr->done.lock);
590 if (list_empty(&pcr->done.list)) {
591 mutex_unlock(&pcr->done.lock);
592 return -EBUSY;
594 item = list_first_entry(&pcr->done.list, struct todo_list_item, __hook);
595 list_del(&item->__hook);
596 mutex_unlock(&pcr->done.lock);
598 memcpy(kcop, &item->kcop, sizeof(struct kernel_crypt_op));
599 retval = item->result;
601 mutex_lock(&pcr->free.lock);
602 list_add_tail(&item->__hook, &pcr->free.list);
603 mutex_unlock(&pcr->free.lock);
605 /* wake for POLLOUT */
606 wake_up_interruptible(&pcr->user_waiter);
608 return retval;
611 /* this function has to be called from process context */
612 static int fill_kcop_from_cop(struct kernel_crypt_op *kcop, struct fcrypt *fcr)
614 struct crypt_op *cop = &kcop->cop;
615 struct csession *ses_ptr;
616 int rc;
618 /* this also enters ses_ptr->sem */
619 ses_ptr = crypto_get_session_by_sid(fcr, cop->ses);
620 if (unlikely(!ses_ptr)) {
621 dprintk(1, KERN_ERR, "invalid session ID=0x%08X\n", cop->ses);
622 return -EINVAL;
624 kcop->ivlen = cop->iv ? ses_ptr->cdata.ivsize : 0;
625 kcop->digestsize = 0; /* will be updated during operation */
627 crypto_put_session(ses_ptr);
629 kcop->task = current;
630 kcop->mm = current->mm;
632 if (cop->iv) {
633 rc = copy_from_user(kcop->iv, cop->iv, kcop->ivlen);
634 if (unlikely(rc)) {
635 dprintk(1, KERN_ERR,
636 "error copying IV (%d bytes), copy_from_user returned %d for address %lx\n",
637 kcop->ivlen, rc, (unsigned long)cop->iv);
638 return -EFAULT;
642 return 0;
645 /* this function has to be called from process context */
646 static int fill_cop_from_kcop(struct kernel_crypt_op *kcop, struct fcrypt *fcr)
648 int ret;
650 if (kcop->digestsize) {
651 ret = copy_to_user(kcop->cop.mac,
652 kcop->hash_output, kcop->digestsize);
653 if (unlikely(ret))
654 return -EFAULT;
656 if (kcop->ivlen && kcop->cop.flags & COP_FLAG_WRITE_IV) {
657 ret = copy_to_user(kcop->cop.iv,
658 kcop->iv, kcop->ivlen);
659 if (unlikely(ret))
660 return -EFAULT;
662 return 0;
665 static int kcop_from_user(struct kernel_crypt_op *kcop,
666 struct fcrypt *fcr, void __user *arg)
668 if (unlikely(copy_from_user(&kcop->cop, arg, sizeof(kcop->cop))))
669 return -EFAULT;
671 return fill_kcop_from_cop(kcop, fcr);
674 static int kcop_to_user(struct kernel_crypt_op *kcop,
675 struct fcrypt *fcr, void __user *arg)
677 int ret;
679 ret = fill_cop_from_kcop(kcop, fcr);
680 if (unlikely(ret)) {
681 dprintk(1, KERN_ERR, "Error in fill_cop_from_kcop\n");
682 return ret;
685 if (unlikely(copy_to_user(arg, &kcop->cop, sizeof(kcop->cop)))) {
686 dprintk(1, KERN_ERR, "Cannot copy to userspace\n");
687 return -EFAULT;
689 return 0;
692 static inline void tfm_info_to_alg_info(struct alg_info *dst, struct crypto_tfm *tfm)
694 snprintf(dst->cra_name, CRYPTODEV_MAX_ALG_NAME,
695 "%s", crypto_tfm_alg_name(tfm));
696 snprintf(dst->cra_driver_name, CRYPTODEV_MAX_ALG_NAME,
697 "%s", crypto_tfm_alg_driver_name(tfm));
700 static unsigned int is_known_accelerated(struct crypto_tfm *tfm)
702 const char* name = crypto_tfm_alg_driver_name(tfm);
704 if (name == NULL)
705 return 1; /* assume accelerated */
707 if (strstr(name, "-talitos"))
708 return 1;
709 else if (strncmp(name, "mv-", 3))
710 return 1;
711 else if (strstr(name, "geode"))
712 return 1;
713 else if (strstr(name, "hifn"))
714 return 1;
715 else if (strstr(name, "-ixp4xx"))
716 return 1;
717 else if (strstr(name, "-omap"))
718 return 1;
719 else if (strstr(name, "-picoxcell"))
720 return 1;
721 else if (strstr(name, "-s5p"))
722 return 1;
723 else if (strstr(name, "-ppc4xx"))
724 return 1;
725 else if (strstr(name, "-caam"))
726 return 1;
727 else if (strstr(name, "-n2"))
728 return 1;
730 return 0;
733 static int get_session_info(struct fcrypt *fcr, struct session_info_op *siop)
735 struct csession *ses_ptr;
736 struct crypto_tfm *tfm;
738 /* this also enters ses_ptr->sem */
739 ses_ptr = crypto_get_session_by_sid(fcr, siop->ses);
740 if (unlikely(!ses_ptr)) {
741 dprintk(1, KERN_ERR, "invalid session ID=0x%08X\n", siop->ses);
742 return -EINVAL;
745 siop->flags = 0;
747 if (ses_ptr->cdata.init) {
748 if (ses_ptr->cdata.aead == 0) {
749 tfm = crypto_ablkcipher_tfm(ses_ptr->cdata.async.s);
750 } else {
751 tfm = crypto_aead_tfm(ses_ptr->cdata.async.as);
753 tfm_info_to_alg_info(&siop->cipher_info, tfm);
754 #ifdef CRYPTO_ALG_KERN_DRIVER_ONLY
755 if (tfm->__crt_alg->cra_flags & CRYPTO_ALG_KERN_DRIVER_ONLY)
756 siop->flags |= SIOP_FLAG_KERNEL_DRIVER_ONLY;
757 #else
758 if (is_known_accelerated(tfm))
759 siop->flags |= SIOP_FLAG_KERNEL_DRIVER_ONLY;
760 #endif
762 if (ses_ptr->hdata.init) {
763 tfm = crypto_ahash_tfm(ses_ptr->hdata.async.s);
764 tfm_info_to_alg_info(&siop->hash_info, tfm);
765 #ifdef CRYPTO_ALG_KERN_DRIVER_ONLY
766 if (tfm->__crt_alg->cra_flags & CRYPTO_ALG_KERN_DRIVER_ONLY)
767 siop->flags |= SIOP_FLAG_KERNEL_DRIVER_ONLY;
768 #else
769 if (is_known_accelerated(tfm))
770 siop->flags |= SIOP_FLAG_KERNEL_DRIVER_ONLY;
771 #endif
774 siop->alignmask = ses_ptr->alignmask;
776 crypto_put_session(ses_ptr);
777 return 0;
780 static long
781 cryptodev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg_)
783 void __user *arg = (void __user *)arg_;
784 int __user *p = arg;
785 struct session_op sop;
786 struct kernel_crypt_op kcop;
787 struct kernel_crypt_auth_op kcaop;
788 struct crypt_priv *pcr = filp->private_data;
789 struct fcrypt *fcr;
790 struct session_info_op siop;
791 uint32_t ses;
792 int ret, fd;
794 if (unlikely(!pcr))
795 BUG();
797 fcr = &pcr->fcrypt;
799 switch (cmd) {
800 case CIOCASYMFEAT:
801 return put_user(0, p);
802 case CRIOGET:
803 fd = clonefd(filp);
804 ret = put_user(fd, p);
805 if (unlikely(ret)) {
806 sys_close(fd);
807 return ret;
809 return ret;
810 case CIOCGSESSION:
811 if (unlikely(copy_from_user(&sop, arg, sizeof(sop))))
812 return -EFAULT;
814 ret = crypto_create_session(fcr, &sop);
815 if (unlikely(ret))
816 return ret;
817 ret = copy_to_user(arg, &sop, sizeof(sop));
818 if (unlikely(ret)) {
819 crypto_finish_session(fcr, sop.ses);
820 return -EFAULT;
822 return ret;
823 case CIOCFSESSION:
824 ret = get_user(ses, (uint32_t __user *)arg);
825 if (unlikely(ret))
826 return ret;
827 ret = crypto_finish_session(fcr, ses);
828 return ret;
829 case CIOCGSESSINFO:
830 if (unlikely(copy_from_user(&siop, arg, sizeof(siop))))
831 return -EFAULT;
833 ret = get_session_info(fcr, &siop);
834 if (unlikely(ret))
835 return ret;
836 return copy_to_user(arg, &siop, sizeof(siop));
837 case CIOCCRYPT:
838 if (unlikely(ret = kcop_from_user(&kcop, fcr, arg))) {
839 dprintk(1, KERN_WARNING, "Error copying from user");
840 return ret;
843 ret = crypto_run(fcr, &kcop);
844 if (unlikely(ret)) {
845 dprintk(1, KERN_WARNING, "Error in crypto_run");
846 return ret;
849 return kcop_to_user(&kcop, fcr, arg);
850 case CIOCAUTHCRYPT:
851 if (unlikely(ret = kcaop_from_user(&kcaop, fcr, arg))) {
852 dprintk(1, KERN_WARNING, "Error copying from user");
853 return ret;
856 ret = crypto_auth_run(fcr, &kcaop);
857 if (unlikely(ret)) {
858 dprintk(1, KERN_WARNING, "Error in crypto_auth_run");
859 return ret;
861 return kcaop_to_user(&kcaop, fcr, arg);
862 case CIOCASYNCCRYPT:
863 if (unlikely(ret = kcop_from_user(&kcop, fcr, arg)))
864 return ret;
866 return crypto_async_run(pcr, &kcop);
867 case CIOCASYNCFETCH:
868 ret = crypto_async_fetch(pcr, &kcop);
869 if (unlikely(ret))
870 return ret;
872 return kcop_to_user(&kcop, fcr, arg);
873 default:
874 return -EINVAL;
878 /* compatibility code for 32bit userlands */
879 #ifdef CONFIG_COMPAT
881 static inline void
882 compat_to_session_op(struct compat_session_op *compat, struct session_op *sop)
884 sop->cipher = compat->cipher;
885 sop->mac = compat->mac;
886 sop->keylen = compat->keylen;
888 sop->key = compat_ptr(compat->key);
889 sop->mackeylen = compat->mackeylen;
890 sop->mackey = compat_ptr(compat->mackey);
891 sop->ses = compat->ses;
894 static inline void
895 session_op_to_compat(struct session_op *sop, struct compat_session_op *compat)
897 compat->cipher = sop->cipher;
898 compat->mac = sop->mac;
899 compat->keylen = sop->keylen;
901 compat->key = ptr_to_compat(sop->key);
902 compat->mackeylen = sop->mackeylen;
903 compat->mackey = ptr_to_compat(sop->mackey);
904 compat->ses = sop->ses;
907 static inline void
908 compat_to_crypt_op(struct compat_crypt_op *compat, struct crypt_op *cop)
910 cop->ses = compat->ses;
911 cop->op = compat->op;
912 cop->flags = compat->flags;
913 cop->len = compat->len;
915 cop->src = compat_ptr(compat->src);
916 cop->dst = compat_ptr(compat->dst);
917 cop->mac = compat_ptr(compat->mac);
918 cop->iv = compat_ptr(compat->iv);
921 static inline void
922 crypt_op_to_compat(struct crypt_op *cop, struct compat_crypt_op *compat)
924 compat->ses = cop->ses;
925 compat->op = cop->op;
926 compat->flags = cop->flags;
927 compat->len = cop->len;
929 compat->src = ptr_to_compat(cop->src);
930 compat->dst = ptr_to_compat(cop->dst);
931 compat->mac = ptr_to_compat(cop->mac);
932 compat->iv = ptr_to_compat(cop->iv);
935 static int compat_kcop_from_user(struct kernel_crypt_op *kcop,
936 struct fcrypt *fcr, void __user *arg)
938 struct compat_crypt_op compat_cop;
940 if (unlikely(copy_from_user(&compat_cop, arg, sizeof(compat_cop))))
941 return -EFAULT;
942 compat_to_crypt_op(&compat_cop, &kcop->cop);
944 return fill_kcop_from_cop(kcop, fcr);
947 static int compat_kcop_to_user(struct kernel_crypt_op *kcop,
948 struct fcrypt *fcr, void __user *arg)
950 int ret;
951 struct compat_crypt_op compat_cop;
953 ret = fill_cop_from_kcop(kcop, fcr);
954 if (unlikely(ret)) {
955 dprintk(1, KERN_WARNING, "Error in fill_cop_from_kcop");
956 return ret;
958 crypt_op_to_compat(&kcop->cop, &compat_cop);
960 if (unlikely(copy_to_user(arg, &compat_cop, sizeof(compat_cop)))) {
961 dprintk(1, KERN_WARNING, "Error copying to user");
962 return -EFAULT;
964 return 0;
967 static long
968 cryptodev_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg_)
970 void __user *arg = (void __user *)arg_;
971 struct crypt_priv *pcr = file->private_data;
972 struct fcrypt *fcr;
973 struct session_op sop;
974 struct compat_session_op compat_sop;
975 struct kernel_crypt_op kcop;
976 int ret;
978 if (unlikely(!pcr))
979 BUG();
981 fcr = &pcr->fcrypt;
983 switch (cmd) {
984 case CIOCASYMFEAT:
985 case CRIOGET:
986 case CIOCFSESSION:
987 case CIOCGSESSINFO:
988 return cryptodev_ioctl(file, cmd, arg_);
990 case COMPAT_CIOCGSESSION:
991 if (unlikely(copy_from_user(&compat_sop, arg,
992 sizeof(compat_sop))))
993 return -EFAULT;
994 compat_to_session_op(&compat_sop, &sop);
996 ret = crypto_create_session(fcr, &sop);
997 if (unlikely(ret))
998 return ret;
1000 session_op_to_compat(&sop, &compat_sop);
1001 ret = copy_to_user(arg, &compat_sop, sizeof(compat_sop));
1002 if (unlikely(ret)) {
1003 crypto_finish_session(fcr, sop.ses);
1004 return -EFAULT;
1006 return ret;
1008 case COMPAT_CIOCCRYPT:
1009 ret = compat_kcop_from_user(&kcop, fcr, arg);
1010 if (unlikely(ret))
1011 return ret;
1013 ret = crypto_run(fcr, &kcop);
1014 if (unlikely(ret))
1015 return ret;
1017 return compat_kcop_to_user(&kcop, fcr, arg);
1018 case COMPAT_CIOCASYNCCRYPT:
1019 if (unlikely(ret = compat_kcop_from_user(&kcop, fcr, arg)))
1020 return ret;
1022 return crypto_async_run(pcr, &kcop);
1023 case COMPAT_CIOCASYNCFETCH:
1024 ret = crypto_async_fetch(pcr, &kcop);
1025 if (unlikely(ret))
1026 return ret;
1028 return compat_kcop_to_user(&kcop, fcr, arg);
1030 default:
1031 return -EINVAL;
1035 #endif /* CONFIG_COMPAT */
1037 static unsigned int cryptodev_poll(struct file *file, poll_table *wait)
1039 struct crypt_priv *pcr = file->private_data;
1040 int ret = 0;
1042 poll_wait(file, &pcr->user_waiter, wait);
1044 if (!list_empty_careful(&pcr->done.list))
1045 ret |= POLLIN | POLLRDNORM;
1046 if (!list_empty_careful(&pcr->free.list) || pcr->itemcount < MAX_COP_RINGSIZE)
1047 ret |= POLLOUT | POLLWRNORM;
1049 return ret;
1052 static const struct file_operations cryptodev_fops = {
1053 .owner = THIS_MODULE,
1054 .open = cryptodev_open,
1055 .release = cryptodev_release,
1056 .unlocked_ioctl = cryptodev_ioctl,
1057 #ifdef CONFIG_COMPAT
1058 .compat_ioctl = cryptodev_compat_ioctl,
1059 #endif /* CONFIG_COMPAT */
1060 .poll = cryptodev_poll,
1063 static struct miscdevice cryptodev = {
1064 .minor = MISC_DYNAMIC_MINOR,
1065 .name = "crypto",
1066 .fops = &cryptodev_fops,
1067 .mode = S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP|S_IROTH|S_IWOTH,
1070 static int __init
1071 cryptodev_register(void)
1073 int rc;
1075 rc = misc_register(&cryptodev);
1076 if (unlikely(rc)) {
1077 printk(KERN_ERR PFX "registration of /dev/crypto failed\n");
1078 return rc;
1081 return 0;
1084 static void __exit
1085 cryptodev_deregister(void)
1087 misc_deregister(&cryptodev);
1090 /* ====== Module init/exit ====== */
1091 static int __init init_cryptodev(void)
1093 int rc;
1095 cryptodev_wq = create_workqueue("cryptodev_queue");
1096 if (unlikely(!cryptodev_wq)) {
1097 printk(KERN_ERR PFX "failed to allocate the cryptodev workqueue\n");
1098 return -EFAULT;
1101 rc = cryptodev_register();
1102 if (unlikely(rc)) {
1103 destroy_workqueue(cryptodev_wq);
1104 return rc;
1107 printk(KERN_INFO PFX "driver %s loaded.\n", VERSION);
1109 return 0;
1112 static void __exit exit_cryptodev(void)
1114 flush_workqueue(cryptodev_wq);
1115 destroy_workqueue(cryptodev_wq);
1117 cryptodev_deregister();
1118 printk(KERN_INFO PFX "driver unloaded.\n");
1121 module_init(init_cryptodev);
1122 module_exit(exit_cryptodev);