TLS mode flag is being ignored for AEAD ciphers.
[cryptodev-linux.git] / authenc.c
blob9848e06c900ec4c725f97aef3a2573f9cc6b4dde
1 /*
2 * Driver for /dev/crypto device (aka CryptoDev)
4 * Copyright (c) 2011 Nikos Mavrogiannopoulos <nmav@gnutls.org>
6 * This file is part of linux cryptodev.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc.,
21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
25 * This file handles the AEAD part of /dev/crypto.
29 #include <crypto/hash.h>
30 #include <linux/crypto.h>
31 #include <linux/mm.h>
32 #include <linux/highmem.h>
33 #include <linux/ioctl.h>
34 #include <linux/random.h>
35 #include <linux/syscalls.h>
36 #include <linux/pagemap.h>
37 #include <linux/poll.h>
38 #include <linux/uaccess.h>
39 #include <crypto/cryptodev.h>
40 #include <crypto/scatterwalk.h>
41 #include <linux/scatterlist.h>
42 #include "cryptodev_int.h"
43 #include "zc.h"
44 #include "cryptlib.h"
45 #include "version.h"
48 /* make caop->dst available in scatterlist.
49 * (caop->src is assumed to be equal to caop->dst)
51 static int get_userbuf_tls(struct csession *ses, struct kernel_crypt_auth_op *kcaop,
52 struct scatterlist **dst_sg,
53 int *tot_pages)
55 int pagecount = 0;
56 struct crypt_auth_op *caop = &kcaop->caop;
57 int rc;
59 if (caop->dst == NULL)
60 return -EINVAL;
62 if (ses->alignmask) {
63 if (!IS_ALIGNED((unsigned long)caop->dst, ses->alignmask))
64 dprintk(2, KERN_WARNING, "%s: careful - source address %lx is not %d byte aligned\n",
65 __func__, (unsigned long)caop->dst, ses->alignmask + 1);
68 if (kcaop->dst_len == 0) {
69 dprintk(1, KERN_WARNING, "Destination length cannot be zero\n");
70 return -EINVAL;
73 pagecount = PAGECOUNT(caop->dst, kcaop->dst_len);
75 (*tot_pages) = pagecount;
77 rc = adjust_sg_array(ses, pagecount);
78 if (rc)
79 return rc;
81 rc = __get_userbuf(caop->dst, kcaop->dst_len, 1, pagecount,
82 ses->pages, ses->sg, kcaop->task, kcaop->mm);
83 if (unlikely(rc)) {
84 dprintk(1, KERN_ERR,
85 "failed to get user pages for data input\n");
86 return -EINVAL;
89 (*dst_sg) = ses->sg;
91 return 0;
94 /* Taken from Maxim Levitsky's patch
96 static struct scatterlist *sg_advance(struct scatterlist *sg, int consumed)
98 while (consumed >= sg->length) {
99 consumed -= sg->length;
101 sg = sg_next(sg);
102 if (!sg)
103 break;
106 WARN_ON(!sg && consumed);
108 if (!sg)
109 return NULL;
111 sg->offset += consumed;
112 sg->length -= consumed;
114 if (sg->offset >= PAGE_SIZE) {
115 struct page *page =
116 nth_page(sg_page(sg), sg->offset / PAGE_SIZE);
117 sg_set_page(sg, page, sg->length, sg->offset % PAGE_SIZE);
120 return sg;
124 * sg_copy - copies sg entries from sg_from to sg_to, such
125 * as sg_to covers first 'len' bytes from sg_from.
127 static int sg_copy(struct scatterlist *sg_from, struct scatterlist *sg_to, int len)
129 while (len > sg_from->length) {
130 len -= sg_from->length;
132 sg_set_page(sg_to, sg_page(sg_from),
133 sg_from->length, sg_from->offset);
135 sg_to = sg_next(sg_to);
136 sg_from = sg_next(sg_from);
138 if (len && (!sg_from || !sg_to))
139 return -ENOMEM;
142 if (len)
143 sg_set_page(sg_to, sg_page(sg_from),
144 len, sg_from->offset);
145 sg_mark_end(sg_to);
146 return 0;
149 #define MAX_SRTP_AUTH_DATA_DIFF 256
151 /* Makes caop->auth_src available as scatterlist.
152 * It also provides a pointer to caop->dst, which however,
153 * is assumed to be within the caop->auth_src buffer. If not
154 * (if their difference exceeds MAX_SRTP_AUTH_DATA_DIFF) it
155 * returns error.
157 static int get_userbuf_srtp(struct csession *ses, struct kernel_crypt_auth_op *kcaop,
158 struct scatterlist **auth_sg, struct scatterlist **dst_sg,
159 int *tot_pages)
161 int pagecount, diff;
162 int auth_pagecount = 0;
163 struct crypt_auth_op *caop = &kcaop->caop;
164 int rc;
166 if (caop->dst == NULL && caop->auth_src == NULL) {
167 dprintk(1, KERN_ERR, "dst and auth_src cannot be both null\n");
168 return -EINVAL;
171 if (ses->alignmask) {
172 if (!IS_ALIGNED((unsigned long)caop->dst, ses->alignmask))
173 dprintk(2, KERN_WARNING, "%s: careful - source address %lx is not %d byte aligned\n",
174 __func__, (unsigned long)caop->dst, ses->alignmask + 1);
175 if (!IS_ALIGNED((unsigned long)caop->auth_src, ses->alignmask))
176 dprintk(2, KERN_WARNING, "%s: careful - source address %lx is not %d byte aligned\n",
177 __func__, (unsigned long)caop->auth_src, ses->alignmask + 1);
180 if (unlikely(kcaop->dst_len == 0 || caop->auth_len == 0)) {
181 dprintk(1, KERN_WARNING, "Destination length cannot be zero\n");
182 return -EINVAL;
185 /* Note that in SRTP auth data overlap with data to be encrypted (dst)
188 auth_pagecount = PAGECOUNT(caop->auth_src, caop->auth_len);
189 diff = (int)(caop->src - caop->auth_src);
190 if (diff > MAX_SRTP_AUTH_DATA_DIFF || diff < 0) {
191 dprintk(1, KERN_WARNING, "auth_src must overlap with src (diff: %d).\n", diff);
192 return -EINVAL;
195 (*tot_pages) = pagecount = auth_pagecount;
197 rc = adjust_sg_array(ses, pagecount*2); /* double pages to have pages for dst(=auth_src) */
198 if (rc) {
199 dprintk(1, KERN_ERR, "cannot adjust sg array\n");
200 return rc;
203 rc = __get_userbuf(caop->auth_src, caop->auth_len, 1, auth_pagecount,
204 ses->pages, ses->sg, kcaop->task, kcaop->mm);
205 if (unlikely(rc)) {
206 dprintk(1, KERN_ERR,
207 "failed to get user pages for data input\n");
208 return -EINVAL;
210 (*auth_sg) = ses->sg;
212 (*dst_sg) = ses->sg + auth_pagecount;
213 sg_init_table(*dst_sg, auth_pagecount);
214 sg_copy(ses->sg, (*dst_sg), caop->auth_len);
215 (*dst_sg) = sg_advance(*dst_sg, diff);
216 if (*dst_sg == NULL) {
217 release_user_pages(ses->pages, pagecount);
218 dprintk(1, KERN_ERR,
219 "failed to get enough pages for auth data\n");
220 return -EINVAL;
223 return 0;
226 /* XXX: inefficient. We could use the getuserbuf, but don't bother
227 * for now.
229 int copy_from_user_to_user( void* __user dst, void* __user src, int len)
231 uint8_t *buffer;
232 int buffer_size = min(len, 16*1024);
233 int rc;
235 if (len > buffer_size) {
236 dprintk(1, KERN_ERR,
237 "The provided buffer is too large\n");
238 return -EINVAL;
241 buffer = kmalloc(buffer_size, GFP_KERNEL);
242 if (buffer == NULL)
243 return -ENOMEM;
245 if (unlikely(copy_from_user(buffer, src, len))) {
246 rc = -EFAULT;
247 goto out;
250 if (unlikely(copy_to_user(dst, buffer, len))) {
251 rc = -EFAULT;
252 goto out;
255 rc = 0;
256 out:
257 kfree(buffer);
258 return rc;
261 static int fill_kcaop_from_caop(struct kernel_crypt_auth_op *kcaop, struct fcrypt *fcr)
263 struct crypt_auth_op *caop = &kcaop->caop;
264 struct csession *ses_ptr;
265 int rc;
267 /* this also enters ses_ptr->sem */
268 ses_ptr = crypto_get_session_by_sid(fcr, caop->ses);
269 if (unlikely(!ses_ptr)) {
270 dprintk(1, KERN_ERR, "invalid session ID=0x%08X\n", caop->ses);
271 return -EINVAL;
274 if (caop->flags & COP_FLAG_AEAD_TLS_TYPE || caop->flags & COP_FLAG_AEAD_SRTP_TYPE) {
275 if (caop->src != caop->dst) {
276 dprintk(2, KERN_ERR,
277 "Non-inplace encryption and decryption is not efficient\n");
279 rc = copy_from_user_to_user( caop->dst, caop->src, caop->len);
280 if (rc < 0)
281 goto out_unlock;
285 if (caop->tag_len == 0)
286 caop->tag_len = ses_ptr->hdata.digestsize;
288 kcaop->ivlen = caop->iv ? ses_ptr->cdata.ivsize : 0;
290 if (caop->flags & COP_FLAG_AEAD_TLS_TYPE)
291 kcaop->dst_len = caop->len + ses_ptr->cdata.blocksize /* pad */ + caop->tag_len;
292 else
293 kcaop->dst_len = caop->len;
295 kcaop->task = current;
296 kcaop->mm = current->mm;
298 if (caop->iv) {
299 rc = copy_from_user(kcaop->iv, caop->iv, kcaop->ivlen);
300 if (unlikely(rc)) {
301 dprintk(1, KERN_ERR,
302 "error copying IV (%d bytes), copy_from_user returned %d for address %lx\n",
303 kcaop->ivlen, rc, (unsigned long)caop->iv);
304 rc = -EFAULT;
305 goto out_unlock;
309 rc = 0;
311 out_unlock:
312 crypto_put_session(ses_ptr);
313 return rc;
317 static int fill_caop_from_kcaop(struct kernel_crypt_auth_op *kcaop, struct fcrypt *fcr)
319 int ret;
321 kcaop->caop.len = kcaop->dst_len;
323 if (kcaop->ivlen && kcaop->caop.flags & COP_FLAG_WRITE_IV) {
324 ret = copy_to_user(kcaop->caop.iv,
325 kcaop->iv, kcaop->ivlen);
326 if (unlikely(ret)) {
327 dprintk(1, KERN_ERR, "Error in copying to userspace\n");
328 return -EFAULT;
331 return 0;
335 int kcaop_from_user(struct kernel_crypt_auth_op *kcaop,
336 struct fcrypt *fcr, void __user *arg)
338 if (unlikely(copy_from_user(&kcaop->caop, arg, sizeof(kcaop->caop)))) {
339 dprintk(1, KERN_ERR, "Error in copying from userspace\n");
340 return -EFAULT;
343 return fill_kcaop_from_caop(kcaop, fcr);
346 int kcaop_to_user(struct kernel_crypt_auth_op *kcaop,
347 struct fcrypt *fcr, void __user *arg)
349 int ret;
351 ret = fill_caop_from_kcaop(kcaop, fcr);
352 if (unlikely(ret)) {
353 dprintk(1, KERN_ERR, "fill_caop_from_kcaop\n");
354 return ret;
357 if (unlikely(copy_to_user(arg, &kcaop->caop, sizeof(kcaop->caop)))) {
358 dprintk(1, KERN_ERR, "Error in copying to userspace\n");
359 return -EFAULT;
361 return 0;
364 static void copy_tls_hash( struct scatterlist *dst_sg, int len, void* hash, int hash_len)
366 scatterwalk_map_and_copy(hash, dst_sg, len, hash_len, 1);
369 static void read_tls_hash( struct scatterlist *dst_sg, int len, void* hash, int hash_len)
371 scatterwalk_map_and_copy(hash, dst_sg, len-hash_len, hash_len, 0);
374 static int pad_record( struct scatterlist *dst_sg, int len, int block_size)
376 uint8_t pad[block_size];
377 int pad_size = block_size - (len % block_size);
379 memset(pad, pad_size-1, pad_size);
381 scatterwalk_map_and_copy(pad, dst_sg, len, pad_size, 1);
383 return pad_size;
386 static int verify_tls_record_pad( struct scatterlist *dst_sg, int len, int block_size)
388 uint8_t pad[256]; /* the maximum allowed */
389 uint8_t pad_size;
390 int i;
392 scatterwalk_map_and_copy(&pad_size, dst_sg, len-1, 1, 0);
394 if (pad_size+1 > len) {
395 dprintk(1, KERN_ERR, "Pad size: %d\n", pad_size);
396 return -ECANCELED;
399 scatterwalk_map_and_copy(pad, dst_sg, len-pad_size-1, pad_size+1, 0);
401 for (i=0;i<pad_size;i++)
402 if (pad[i] != pad_size) {
403 dprintk(1, KERN_ERR, "Pad size: %d, pad: %d\n", pad_size, (int)pad[i]);
404 return -ECANCELED;
407 return pad_size+1;
410 /* Authenticate and encrypt the TLS way (also perform padding).
411 * During decryption it verifies the pad and tag and returns -ECANCELED on error.
413 static int
414 tls_auth_n_crypt(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop,
415 struct scatterlist *auth_sg, uint32_t auth_len,
416 struct scatterlist *dst_sg, uint32_t len)
418 int ret, fail = 0;
419 struct crypt_auth_op *caop = &kcaop->caop;
420 uint8_t vhash[AALG_MAX_RESULT_LEN];
421 uint8_t hash_output[AALG_MAX_RESULT_LEN];
423 /* TLS authenticates the plaintext except for the padding.
425 if (caop->op == COP_ENCRYPT) {
426 if (ses_ptr->hdata.init != 0) {
427 if (auth_len > 0) {
428 ret = cryptodev_hash_update(&ses_ptr->hdata,
429 auth_sg, auth_len);
430 if (unlikely(ret)) {
431 dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
432 return ret;
436 if (len > 0) {
437 ret = cryptodev_hash_update(&ses_ptr->hdata,
438 dst_sg, len);
439 if (unlikely(ret)) {
440 dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
441 return ret;
445 ret = cryptodev_hash_final(&ses_ptr->hdata, hash_output);
446 if (unlikely(ret)) {
447 dprintk(0, KERN_ERR, "cryptodev_hash_final: %d\n", ret);
448 return ret;
451 copy_tls_hash( dst_sg, len, hash_output, caop->tag_len);
452 len += caop->tag_len;
455 if (ses_ptr->cdata.init != 0) {
456 if (ses_ptr->cdata.blocksize > 1) {
457 ret = pad_record(dst_sg, len, ses_ptr->cdata.blocksize);
458 len += ret;
461 ret = cryptodev_cipher_encrypt(&ses_ptr->cdata,
462 dst_sg, dst_sg, len);
463 if (unlikely(ret)) {
464 dprintk(0, KERN_ERR, "cryptodev_cipher_encrypt: %d\n", ret);
465 return ret;
468 } else {
469 if (ses_ptr->cdata.init != 0) {
470 ret = cryptodev_cipher_decrypt(&ses_ptr->cdata,
471 dst_sg, dst_sg, len);
473 if (unlikely(ret)) {
474 dprintk(0, KERN_ERR, "cryptodev_cipher_decrypt: %d\n", ret);
475 return ret;
478 if (ses_ptr->cdata.blocksize > 1) {
479 ret = verify_tls_record_pad(dst_sg, len, ses_ptr->cdata.blocksize);
480 if (unlikely(ret < 0)) {
481 dprintk(2, KERN_ERR, "verify_record_pad: %d\n", ret);
482 fail = 1;
483 } else {
484 len -= ret;
489 if (ses_ptr->hdata.init != 0) {
490 if (unlikely(caop->tag_len > sizeof(vhash) || caop->tag_len > len)) {
491 dprintk(1, KERN_ERR, "Illegal tag len size\n");
492 return -EINVAL;
495 read_tls_hash( dst_sg, len, vhash, caop->tag_len);
496 len -= caop->tag_len;
498 if (auth_len > 0) {
499 ret = cryptodev_hash_update(&ses_ptr->hdata,
500 auth_sg, auth_len);
501 if (unlikely(ret)) {
502 dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
503 return ret;
507 if (len > 0) {
508 ret = cryptodev_hash_update(&ses_ptr->hdata,
509 dst_sg, len);
510 if (unlikely(ret)) {
511 dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
512 return ret;
516 ret = cryptodev_hash_final(&ses_ptr->hdata, hash_output);
517 if (unlikely(ret)) {
518 dprintk(0, KERN_ERR, "cryptodev_hash_final: %d\n", ret);
519 return ret;
522 if (memcmp(vhash, hash_output, caop->tag_len) != 0 || fail != 0) {
523 dprintk(2, KERN_ERR, "MAC verification failed (tag_len: %d)\n", caop->tag_len);
524 return -ECANCELED;
528 kcaop->dst_len = len;
529 return 0;
532 /* Authenticate and encrypt the SRTP way. During decryption
533 * it verifies the tag and returns -ECANCELED on error.
535 static int
536 srtp_auth_n_crypt(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop,
537 struct scatterlist *auth_sg, uint32_t auth_len,
538 struct scatterlist *dst_sg, uint32_t len)
540 int ret, fail = 0;
541 struct crypt_auth_op *caop = &kcaop->caop;
542 uint8_t vhash[AALG_MAX_RESULT_LEN];
543 uint8_t hash_output[AALG_MAX_RESULT_LEN];
545 /* SRTP authenticates the encrypted data.
547 if (caop->op == COP_ENCRYPT) {
548 if (ses_ptr->cdata.init != 0) {
549 if (ses_ptr->cdata.stream == 0) {
550 dprintk(0, KERN_ERR, "Only stream modes are allowed in SRTP mode\n");
551 return -EINVAL;
554 ret = cryptodev_cipher_encrypt(&ses_ptr->cdata,
555 dst_sg, dst_sg, len);
556 if (unlikely(ret)) {
557 dprintk(0, KERN_ERR, "cryptodev_cipher_encrypt: %d\n", ret);
558 return ret;
562 if (ses_ptr->hdata.init != 0) {
563 if (auth_len > 0) {
564 ret = cryptodev_hash_update(&ses_ptr->hdata,
565 auth_sg, auth_len);
566 if (unlikely(ret)) {
567 dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
568 return ret;
572 ret = cryptodev_hash_final(&ses_ptr->hdata, hash_output);
573 if (unlikely(ret)) {
574 dprintk(0, KERN_ERR, "cryptodev_hash_final: %d\n", ret);
575 return ret;
578 if (unlikely(copy_to_user(caop->tag, hash_output, caop->tag_len))) {
579 return -EFAULT;
583 } else {
584 if (ses_ptr->hdata.init != 0) {
585 if (unlikely(caop->tag_len > sizeof(vhash) || caop->tag_len > len)) {
586 dprintk(1, KERN_ERR, "Illegal tag len size\n");
587 return -EINVAL;
590 if (unlikely(copy_from_user(vhash, caop->tag, caop->tag_len))) {
591 return -EFAULT;
594 ret = cryptodev_hash_update(&ses_ptr->hdata,
595 auth_sg, auth_len);
596 if (unlikely(ret)) {
597 dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
598 return ret;
601 ret = cryptodev_hash_final(&ses_ptr->hdata, hash_output);
602 if (unlikely(ret)) {
603 dprintk(0, KERN_ERR, "cryptodev_hash_final: %d\n", ret);
604 return ret;
607 if (memcmp(vhash, hash_output, caop->tag_len) != 0 || fail != 0) {
608 dprintk(2, KERN_ERR, "MAC verification failed\n");
609 return -ECANCELED;
613 if (ses_ptr->cdata.init != 0) {
614 if (ses_ptr->cdata.stream == 0) {
615 dprintk(0, KERN_ERR, "Only stream modes are allowed in SRTP mode\n");
616 return -EINVAL;
619 ret = cryptodev_cipher_decrypt(&ses_ptr->cdata,
620 dst_sg, dst_sg, len);
622 if (unlikely(ret)) {
623 dprintk(0, KERN_ERR, "cryptodev_cipher_decrypt: %d\n", ret);
624 return ret;
629 kcaop->dst_len = len;
630 return 0;
633 /* Typical AEAD (i.e. GCM) encryption/decryption.
634 * During decryption the tag is verified.
636 static int
637 auth_n_crypt(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop,
638 struct scatterlist *auth_sg, uint32_t auth_len,
639 struct scatterlist *src_sg,
640 struct scatterlist *dst_sg, uint32_t len)
642 int ret;
643 struct crypt_auth_op *caop = &kcaop->caop;
644 int max_tag_len;
647 if (unlikely(ses_ptr->cdata.init == 0))
648 return -EINVAL;
650 if (unlikely(ses_ptr->cdata.stream == 0 || ses_ptr->cdata.aead == 0)) {
651 dprintk(0, KERN_ERR, "Only stream and AEAD ciphers are allowed for authenc\n");
652 return -EINVAL;
655 max_tag_len = cryptodev_cipher_get_tag_size(&ses_ptr->cdata);
656 if (unlikely(caop->tag_len > max_tag_len)) {
657 dprintk(0, KERN_ERR, "Illegal tag length: %d\n", caop->tag_len);
658 return -EINVAL;
661 if (caop->tag_len)
662 cryptodev_cipher_set_tag_size(&ses_ptr->cdata, caop->tag_len);
663 else
664 caop->tag_len = max_tag_len;
666 if (caop->op == COP_ENCRYPT) {
667 if (auth_len > 0)
668 cryptodev_cipher_auth(&ses_ptr->cdata, auth_sg, auth_len);
669 else /* for some reason we _have_ to call that */
670 cryptodev_cipher_auth(&ses_ptr->cdata, NULL, 0);
672 ret = cryptodev_cipher_encrypt(&ses_ptr->cdata,
673 src_sg, dst_sg, len);
674 if (unlikely(ret)) {
675 dprintk(0, KERN_ERR, "cryptodev_cipher_encrypt: %d\n", ret);
676 return ret;
678 kcaop->dst_len = len + caop->tag_len;
679 caop->tag = caop->dst + len;
680 } else {
681 if (auth_len > 0)
682 cryptodev_cipher_auth(&ses_ptr->cdata, auth_sg, auth_len);
684 ret = cryptodev_cipher_decrypt(&ses_ptr->cdata,
685 src_sg, dst_sg, len);
687 if (unlikely(ret)) {
688 dprintk(0, KERN_ERR, "cryptodev_cipher_decrypt: %d\n", ret);
689 return ret;
691 kcaop->dst_len = len - caop->tag_len;
692 caop->tag = caop->dst + len - caop->tag_len;
695 return 0;
698 /* This is the main crypto function - zero-copy edition */
699 static int
700 __crypto_auth_run_zc(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop)
702 struct scatterlist *dst_sg, *auth_sg, *src_sg;
703 struct crypt_auth_op *caop = &kcaop->caop;
704 int ret = 0, pagecount = 0;
706 if (caop->flags & COP_FLAG_AEAD_SRTP_TYPE) {
707 ret = get_userbuf_srtp(ses_ptr, kcaop, &auth_sg, &dst_sg, &pagecount);
708 if (unlikely(ret)) {
709 dprintk(1, KERN_ERR, "get_userbuf_srtp(): Error getting user pages.\n");
710 return ret;
713 ret = srtp_auth_n_crypt(ses_ptr, kcaop, auth_sg, caop->auth_len,
714 dst_sg, caop->len);
715 } else { /* TLS and normal cases. Here auth data are usually small
716 * so we just copy them to a free page, instead of trying
717 * to map them.
719 unsigned char* auth_buf = NULL;
720 struct scatterlist tmp;
722 if (unlikely(caop->auth_len > PAGE_SIZE))
723 return -EINVAL;
725 auth_buf = (char *)__get_free_page(GFP_KERNEL);
726 if (unlikely(!auth_buf))
727 return -ENOMEM;
729 if (caop->auth_len > 0) {
730 if (unlikely(copy_from_user(auth_buf, caop->auth_src, caop->auth_len))) {
731 ret = -EFAULT;
732 goto fail;
735 sg_init_one(&tmp, auth_buf, caop->auth_len);
736 auth_sg = &tmp;
737 } else {
738 auth_sg = NULL;
741 if (caop->flags & COP_FLAG_AEAD_TLS_TYPE && ses_ptr->cdata.aead == 0) {
742 ret = get_userbuf_tls(ses_ptr, kcaop, &dst_sg, &pagecount);
743 if (unlikely(ret)) {
744 dprintk(1, KERN_ERR, "get_userbuf_tls(): Error getting user pages.\n");
745 goto fail;
748 ret = tls_auth_n_crypt(ses_ptr, kcaop, auth_sg, caop->auth_len,
749 dst_sg, caop->len);
750 } else {
751 int dst_len;
753 if (caop->op == COP_ENCRYPT) dst_len = caop->len + cryptodev_cipher_get_tag_size(&ses_ptr->cdata);
754 else dst_len = caop->len - cryptodev_cipher_get_tag_size(&ses_ptr->cdata);
756 ret = get_userbuf(ses_ptr, caop->src, caop->len, caop->dst, dst_len,
757 kcaop->task, kcaop->mm, &src_sg, &dst_sg, &pagecount);
758 if (unlikely(ret)) {
759 dprintk(1, KERN_ERR, "get_userbuf(): Error getting user pages.\n");
760 goto fail;
763 ret = auth_n_crypt(ses_ptr, kcaop, auth_sg, caop->auth_len,
764 src_sg, dst_sg, caop->len);
767 fail:
768 free_page((unsigned long)auth_buf);
771 release_user_pages(ses_ptr->pages, pagecount);
772 return ret;
776 int crypto_auth_run(struct fcrypt *fcr, struct kernel_crypt_auth_op *kcaop)
778 struct csession *ses_ptr;
779 struct crypt_auth_op *caop = &kcaop->caop;
780 int ret;
782 if (unlikely(caop->op != COP_ENCRYPT && caop->op != COP_DECRYPT)) {
783 dprintk(1, KERN_DEBUG, "invalid operation op=%u\n", caop->op);
784 return -EINVAL;
787 /* this also enters ses_ptr->sem */
788 ses_ptr = crypto_get_session_by_sid(fcr, caop->ses);
789 if (unlikely(!ses_ptr)) {
790 dprintk(1, KERN_ERR, "invalid session ID=0x%08X\n", caop->ses);
791 return -EINVAL;
794 if (unlikely(ses_ptr->cdata.init == 0)) {
795 dprintk(1, KERN_ERR, "cipher context not initialized\n");
796 ret = -EINVAL;
797 goto out_unlock;
800 /* If we have a hash/mac handle reset its state */
801 if (ses_ptr->hdata.init != 0) {
802 ret = cryptodev_hash_reset(&ses_ptr->hdata);
803 if (unlikely(ret)) {
804 dprintk(1, KERN_ERR,
805 "error in cryptodev_hash_reset()\n");
806 goto out_unlock;
810 cryptodev_cipher_set_iv(&ses_ptr->cdata, kcaop->iv,
811 min(ses_ptr->cdata.ivsize, kcaop->ivlen));
813 if (likely(caop->len || caop->auth_len)) {
814 ret = __crypto_auth_run_zc(ses_ptr, kcaop);
815 if (unlikely(ret))
816 goto out_unlock;
817 } else {
818 ret = -EINVAL;
819 goto out_unlock;
822 ret = 0;
824 cryptodev_cipher_get_iv(&ses_ptr->cdata, kcaop->iv,
825 min(ses_ptr->cdata.ivsize, kcaop->ivlen));
827 out_unlock:
828 crypto_put_session(ses_ptr);
829 return ret;