Added example of AES-GCM usage.
[cryptodev-linux.git] / authenc.c
blob2ad0daad99638546f2ad58eb0d8b12f7b2f666d4
1 /*
2 * Driver for /dev/crypto device (aka CryptoDev)
4 * Copyright (c) 2011 Nikos Mavrogiannopoulos <nmav@gnutls.org>
6 * This file is part of linux cryptodev.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc.,
21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
25 * This file handles the AEAD part of /dev/crypto.
29 #include <crypto/hash.h>
30 #include <linux/crypto.h>
31 #include <linux/mm.h>
32 #include <linux/highmem.h>
33 #include <linux/ioctl.h>
34 #include <linux/random.h>
35 #include <linux/syscalls.h>
36 #include <linux/pagemap.h>
37 #include <linux/poll.h>
38 #include <linux/uaccess.h>
39 #include <crypto/cryptodev.h>
40 #include <crypto/scatterwalk.h>
41 #include <linux/scatterlist.h>
42 #include "cryptodev_int.h"
43 #include "zc.h"
44 #include "cryptlib.h"
45 #include "version.h"
48 /* make caop->dst available in scatterlist.
49 * (caop->src is assumed to be equal to caop->dst)
51 static int get_userbuf_tls(struct csession *ses, struct kernel_crypt_auth_op *kcaop,
52 struct scatterlist **dst_sg,
53 int *tot_pages)
55 int pagecount = 0;
56 struct crypt_auth_op *caop = &kcaop->caop;
57 int rc;
59 if (caop->dst == NULL)
60 return -EINVAL;
62 if (ses->alignmask) {
63 if (!IS_ALIGNED((unsigned long)caop->dst, ses->alignmask))
64 dprintk(2, KERN_WARNING, "%s: careful - source address %lx is not %d byte aligned\n",
65 __func__, (unsigned long)caop->dst, ses->alignmask + 1);
68 if (kcaop->dst_len == 0) {
69 dprintk(1, KERN_WARNING, "Destination length cannot be zero\n");
70 return -EINVAL;
73 pagecount = PAGECOUNT(caop->dst, kcaop->dst_len);
75 (*tot_pages) = pagecount;
77 rc = adjust_sg_array(ses, pagecount);
78 if (rc)
79 return rc;
81 rc = __get_userbuf(caop->dst, kcaop->dst_len, 1, pagecount,
82 ses->pages, ses->sg, kcaop->task, kcaop->mm);
83 if (unlikely(rc)) {
84 dprintk(1, KERN_ERR,
85 "failed to get user pages for data input\n");
86 return -EINVAL;
89 (*dst_sg) = ses->sg;
91 return 0;
94 /* Taken from Maxim Levitsky's patch
96 static struct scatterlist *sg_advance(struct scatterlist *sg, int consumed)
98 while (consumed >= sg->length) {
99 consumed -= sg->length;
101 sg = sg_next(sg);
102 if (!sg)
103 break;
106 WARN_ON(!sg && consumed);
108 if (!sg)
109 return NULL;
111 sg->offset += consumed;
112 sg->length -= consumed;
114 if (sg->offset >= PAGE_SIZE) {
115 struct page *page =
116 nth_page(sg_page(sg), sg->offset / PAGE_SIZE);
117 sg_set_page(sg, page, sg->length, sg->offset % PAGE_SIZE);
120 return sg;
124 * sg_copy - copies sg entries from sg_from to sg_to, such
125 * as sg_to covers first 'len' bytes from sg_from.
127 static int sg_copy(struct scatterlist *sg_from, struct scatterlist *sg_to, int len)
129 while (len > sg_from->length) {
130 len -= sg_from->length;
132 sg_set_page(sg_to, sg_page(sg_from),
133 sg_from->length, sg_from->offset);
135 sg_to = sg_next(sg_to);
136 sg_from = sg_next(sg_from);
138 if (len && (!sg_from || !sg_to))
139 return -ENOMEM;
142 if (len)
143 sg_set_page(sg_to, sg_page(sg_from),
144 len, sg_from->offset);
145 sg_mark_end(sg_to);
146 return 0;
149 #define MAX_SRTP_AUTH_DATA_DIFF 256
151 /* Makes caop->auth_src available as scatterlist.
152 * It also provides a pointer to caop->dst, which however,
153 * is assumed to be within the caop->auth_src buffer. If not
154 * (if their difference exceeds MAX_SRTP_AUTH_DATA_DIFF) it
155 * returns error.
157 static int get_userbuf_srtp(struct csession *ses, struct kernel_crypt_auth_op *kcaop,
158 struct scatterlist **auth_sg, struct scatterlist **dst_sg,
159 int *tot_pages)
161 int pagecount, diff;
162 int auth_pagecount = 0;
163 struct crypt_auth_op *caop = &kcaop->caop;
164 int rc;
166 if (caop->dst == NULL && caop->auth_src == NULL) {
167 dprintk(1, KERN_ERR, "dst and auth_src cannot be both null\n");
168 return -EINVAL;
171 if (ses->alignmask) {
172 if (!IS_ALIGNED((unsigned long)caop->dst, ses->alignmask))
173 dprintk(2, KERN_WARNING, "%s: careful - source address %lx is not %d byte aligned\n",
174 __func__, (unsigned long)caop->dst, ses->alignmask + 1);
175 if (!IS_ALIGNED((unsigned long)caop->auth_src, ses->alignmask))
176 dprintk(2, KERN_WARNING, "%s: careful - source address %lx is not %d byte aligned\n",
177 __func__, (unsigned long)caop->auth_src, ses->alignmask + 1);
180 if (unlikely(kcaop->dst_len == 0 || caop->auth_len == 0)) {
181 dprintk(1, KERN_WARNING, "Destination length cannot be zero\n");
182 return -EINVAL;
185 /* Note that in SRTP auth data overlap with data to be encrypted (dst)
188 auth_pagecount = PAGECOUNT(caop->auth_src, caop->auth_len);
189 diff = (int)(caop->src - caop->auth_src);
190 if (diff > MAX_SRTP_AUTH_DATA_DIFF || diff < 0) {
191 dprintk(1, KERN_WARNING, "auth_src must overlap with src (diff: %d).\n", diff);
192 return -EINVAL;
195 (*tot_pages) = pagecount = auth_pagecount;
197 rc = adjust_sg_array(ses, pagecount*2); /* double pages to have pages for dst(=auth_src) */
198 if (rc) {
199 dprintk(1, KERN_ERR, "cannot adjust sg array\n");
200 return rc;
203 rc = __get_userbuf(caop->auth_src, caop->auth_len, 1, auth_pagecount,
204 ses->pages, ses->sg, kcaop->task, kcaop->mm);
205 if (unlikely(rc)) {
206 dprintk(1, KERN_ERR,
207 "failed to get user pages for data input\n");
208 return -EINVAL;
210 (*auth_sg) = ses->sg;
212 (*dst_sg) = ses->sg + auth_pagecount;
213 sg_init_table(*dst_sg, auth_pagecount);
214 sg_copy(ses->sg, (*dst_sg), caop->auth_len);
215 (*dst_sg) = sg_advance(*dst_sg, diff);
216 if (*dst_sg == NULL) {
217 release_user_pages(ses->pages, pagecount);
218 dprintk(1, KERN_ERR,
219 "failed to get enough pages for auth data\n");
220 return -EINVAL;
223 return 0;
226 static int fill_kcaop_from_caop(struct kernel_crypt_auth_op *kcaop, struct fcrypt *fcr)
228 struct crypt_auth_op *caop = &kcaop->caop;
229 struct csession *ses_ptr;
230 int ret;
232 /* this also enters ses_ptr->sem */
233 ses_ptr = crypto_get_session_by_sid(fcr, caop->ses);
234 if (unlikely(!ses_ptr)) {
235 dprintk(1, KERN_ERR, "invalid session ID=0x%08X\n", caop->ses);
236 return -EINVAL;
239 if (caop->flags & COP_FLAG_AEAD_TLS_TYPE || caop->flags & COP_FLAG_AEAD_SRTP_TYPE) {
240 if (caop->src != caop->dst) {
241 dprintk(1, KERN_ERR,
242 "Non-inplace encryption and decryption is not efficient and not implemented\n");
243 ret = -EINVAL;
244 goto out_unlock;
248 if (caop->tag_len == 0)
249 caop->tag_len = ses_ptr->hdata.digestsize;
251 kcaop->ivlen = caop->iv ? ses_ptr->cdata.ivsize : 0;
253 if (caop->flags & COP_FLAG_AEAD_TLS_TYPE)
254 kcaop->dst_len = caop->len + ses_ptr->cdata.blocksize /* pad */ + caop->tag_len;
255 else
256 kcaop->dst_len = caop->len;
258 kcaop->task = current;
259 kcaop->mm = current->mm;
261 if (caop->iv) {
262 ret = copy_from_user(kcaop->iv, caop->iv, kcaop->ivlen);
263 if (unlikely(ret)) {
264 dprintk(1, KERN_ERR,
265 "error copying IV (%d bytes), copy_from_user returned %d for address %lx\n",
266 kcaop->ivlen, ret, (unsigned long)caop->iv);
267 ret = -EFAULT;
268 goto out_unlock;
272 ret = 0;
274 out_unlock:
275 crypto_put_session(ses_ptr);
276 return ret;
280 static int fill_caop_from_kcaop(struct kernel_crypt_auth_op *kcaop, struct fcrypt *fcr)
282 int ret;
284 kcaop->caop.len = kcaop->dst_len;
286 if (kcaop->ivlen && kcaop->caop.flags & COP_FLAG_WRITE_IV) {
287 ret = copy_to_user(kcaop->caop.iv,
288 kcaop->iv, kcaop->ivlen);
289 if (unlikely(ret)) {
290 dprintk(1, KERN_ERR, "Error in copying to userspace\n");
291 return -EFAULT;
294 return 0;
298 int kcaop_from_user(struct kernel_crypt_auth_op *kcaop,
299 struct fcrypt *fcr, void __user *arg)
301 if (unlikely(copy_from_user(&kcaop->caop, arg, sizeof(kcaop->caop)))) {
302 dprintk(1, KERN_ERR, "Error in copying from userspace\n");
303 return -EFAULT;
306 return fill_kcaop_from_caop(kcaop, fcr);
309 int kcaop_to_user(struct kernel_crypt_auth_op *kcaop,
310 struct fcrypt *fcr, void __user *arg)
312 int ret;
314 ret = fill_caop_from_kcaop(kcaop, fcr);
315 if (unlikely(ret)) {
316 dprintk(1, KERN_ERR, "fill_caop_from_kcaop\n");
317 return ret;
320 if (unlikely(copy_to_user(arg, &kcaop->caop, sizeof(kcaop->caop)))) {
321 dprintk(1, KERN_ERR, "Error in copying to userspace\n");
322 return -EFAULT;
324 return 0;
327 static void copy_tls_hash( struct scatterlist *dst_sg, int len, void* hash, int hash_len)
329 scatterwalk_map_and_copy(hash, dst_sg, len, hash_len, 1);
332 static void read_tls_hash( struct scatterlist *dst_sg, int len, void* hash, int hash_len)
334 scatterwalk_map_and_copy(hash, dst_sg, len-hash_len, hash_len, 0);
337 static int pad_record( struct scatterlist *dst_sg, int len, int block_size)
339 uint8_t pad[block_size];
340 int pad_size = block_size - (len % block_size);
342 memset(pad, pad_size-1, pad_size);
344 scatterwalk_map_and_copy(pad, dst_sg, len, pad_size, 1);
346 return pad_size;
349 static int verify_tls_record_pad( struct scatterlist *dst_sg, int len, int block_size)
351 uint8_t pad[256]; /* the maximum allowed */
352 uint8_t pad_size;
353 int i;
355 scatterwalk_map_and_copy(&pad_size, dst_sg, len-1, 1, 0);
357 if (pad_size+1 > len) {
358 dprintk(1, KERN_ERR, "Pad size: %d\n", pad_size);
359 return -ECANCELED;
362 scatterwalk_map_and_copy(pad, dst_sg, len-pad_size-1, pad_size+1, 0);
364 for (i=0;i<pad_size;i++)
365 if (pad[i] != pad_size) {
366 dprintk(1, KERN_ERR, "Pad size: %d, pad: %d\n", pad_size, (int)pad[i]);
367 return -ECANCELED;
370 return pad_size+1;
373 /* Authenticate and encrypt the TLS way (also perform padding).
374 * During decryption it verifies the pad and tag and returns -ECANCELED on error.
376 static int
377 tls_auth_n_crypt(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop,
378 struct scatterlist *auth_sg, uint32_t auth_len,
379 struct scatterlist *dst_sg, uint32_t len)
381 int ret, fail = 0;
382 struct crypt_auth_op *caop = &kcaop->caop;
383 uint8_t vhash[AALG_MAX_RESULT_LEN];
384 uint8_t hash_output[AALG_MAX_RESULT_LEN];
386 /* TLS authenticates the plaintext except for the padding.
388 if (caop->op == COP_ENCRYPT) {
389 if (ses_ptr->hdata.init != 0) {
390 if (auth_len > 0) {
391 ret = cryptodev_hash_update(&ses_ptr->hdata,
392 auth_sg, auth_len);
393 if (unlikely(ret)) {
394 dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
395 return ret;
399 if (len > 0) {
400 ret = cryptodev_hash_update(&ses_ptr->hdata,
401 dst_sg, len);
402 if (unlikely(ret)) {
403 dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
404 return ret;
408 ret = cryptodev_hash_final(&ses_ptr->hdata, hash_output);
409 if (unlikely(ret)) {
410 dprintk(0, KERN_ERR, "cryptodev_hash_final: %d\n", ret);
411 return ret;
414 copy_tls_hash( dst_sg, len, hash_output, caop->tag_len);
415 len += caop->tag_len;
418 if (ses_ptr->cdata.init != 0) {
419 if (ses_ptr->cdata.blocksize > 1) {
420 ret = pad_record(dst_sg, len, ses_ptr->cdata.blocksize);
421 len += ret;
424 ret = cryptodev_cipher_encrypt(&ses_ptr->cdata,
425 dst_sg, dst_sg, len);
426 if (unlikely(ret)) {
427 dprintk(0, KERN_ERR, "cryptodev_cipher_encrypt: %d\n", ret);
428 return ret;
431 } else {
432 if (ses_ptr->cdata.init != 0) {
433 ret = cryptodev_cipher_decrypt(&ses_ptr->cdata,
434 dst_sg, dst_sg, len);
436 if (unlikely(ret)) {
437 dprintk(0, KERN_ERR, "cryptodev_cipher_decrypt: %d\n", ret);
438 return ret;
441 if (ses_ptr->cdata.blocksize > 1) {
442 ret = verify_tls_record_pad(dst_sg, len, ses_ptr->cdata.blocksize);
443 if (unlikely(ret < 0)) {
444 dprintk(2, KERN_ERR, "verify_record_pad: %d\n", ret);
445 fail = 1;
446 } else {
447 len -= ret;
452 if (ses_ptr->hdata.init != 0) {
453 if (unlikely(caop->tag_len > sizeof(vhash) || caop->tag_len > len)) {
454 dprintk(1, KERN_ERR, "Illegal tag len size\n");
455 return -EINVAL;
458 read_tls_hash( dst_sg, len, vhash, caop->tag_len);
459 len -= caop->tag_len;
461 if (auth_len > 0) {
462 ret = cryptodev_hash_update(&ses_ptr->hdata,
463 auth_sg, auth_len);
464 if (unlikely(ret)) {
465 dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
466 return ret;
470 if (len > 0) {
471 ret = cryptodev_hash_update(&ses_ptr->hdata,
472 dst_sg, len);
473 if (unlikely(ret)) {
474 dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
475 return ret;
479 ret = cryptodev_hash_final(&ses_ptr->hdata, hash_output);
480 if (unlikely(ret)) {
481 dprintk(0, KERN_ERR, "cryptodev_hash_final: %d\n", ret);
482 return ret;
485 if (memcmp(vhash, hash_output, caop->tag_len) != 0 || fail != 0) {
486 dprintk(2, KERN_ERR, "MAC verification failed (tag_len: %d)\n", caop->tag_len);
487 return -ECANCELED;
491 kcaop->dst_len = len;
492 return 0;
495 /* Authenticate and encrypt the SRTP way. During decryption
496 * it verifies the tag and returns -ECANCELED on error.
498 static int
499 srtp_auth_n_crypt(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop,
500 struct scatterlist *auth_sg, uint32_t auth_len,
501 struct scatterlist *dst_sg, uint32_t len)
503 int ret, fail = 0;
504 struct crypt_auth_op *caop = &kcaop->caop;
505 uint8_t vhash[AALG_MAX_RESULT_LEN];
506 uint8_t hash_output[AALG_MAX_RESULT_LEN];
508 /* SRTP authenticates the encrypted data.
510 if (caop->op == COP_ENCRYPT) {
511 if (ses_ptr->cdata.init != 0) {
512 ret = cryptodev_cipher_encrypt(&ses_ptr->cdata,
513 dst_sg, dst_sg, len);
514 if (unlikely(ret)) {
515 dprintk(0, KERN_ERR, "cryptodev_cipher_encrypt: %d\n", ret);
516 return ret;
520 if (ses_ptr->hdata.init != 0) {
521 if (auth_len > 0) {
522 ret = cryptodev_hash_update(&ses_ptr->hdata,
523 auth_sg, auth_len);
524 if (unlikely(ret)) {
525 dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
526 return ret;
530 ret = cryptodev_hash_final(&ses_ptr->hdata, hash_output);
531 if (unlikely(ret)) {
532 dprintk(0, KERN_ERR, "cryptodev_hash_final: %d\n", ret);
533 return ret;
536 if (unlikely(copy_to_user(caop->tag, hash_output, caop->tag_len))) {
537 return -EFAULT;
541 } else {
542 if (ses_ptr->hdata.init != 0) {
543 if (unlikely(caop->tag_len > sizeof(vhash) || caop->tag_len > len)) {
544 dprintk(1, KERN_ERR, "Illegal tag len size\n");
545 return -EINVAL;
548 if (unlikely(copy_from_user(vhash, caop->tag, caop->tag_len))) {
549 return -EFAULT;
552 ret = cryptodev_hash_update(&ses_ptr->hdata,
553 auth_sg, auth_len);
554 if (unlikely(ret)) {
555 dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
556 return ret;
559 ret = cryptodev_hash_final(&ses_ptr->hdata, hash_output);
560 if (unlikely(ret)) {
561 dprintk(0, KERN_ERR, "cryptodev_hash_final: %d\n", ret);
562 return ret;
565 if (memcmp(vhash, hash_output, caop->tag_len) != 0 || fail != 0) {
566 dprintk(2, KERN_ERR, "MAC verification failed\n");
567 return -ECANCELED;
571 if (ses_ptr->cdata.init != 0) {
572 ret = cryptodev_cipher_decrypt(&ses_ptr->cdata,
573 dst_sg, dst_sg, len);
575 if (unlikely(ret)) {
576 dprintk(0, KERN_ERR, "cryptodev_cipher_decrypt: %d\n", ret);
577 return ret;
582 kcaop->dst_len = len;
583 return 0;
586 /* Typical AEAD (i.e. GCM) encryption/decryption.
587 * During decryption the tag is verified.
589 static int
590 auth_n_crypt(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop,
591 struct scatterlist *auth_sg, uint32_t auth_len,
592 struct scatterlist *src_sg,
593 struct scatterlist *dst_sg, uint32_t len)
595 int ret;
596 struct crypt_auth_op *caop = &kcaop->caop;
597 int max_tag_len;
599 max_tag_len = cryptodev_cipher_get_tag_size(&ses_ptr->cdata);
600 if (unlikely(caop->tag_len > max_tag_len)) {
601 dprintk(0, KERN_ERR, "Illegal tag length: %d\n", caop->tag_len);
602 return -EINVAL;
605 if (caop->tag_len)
606 cryptodev_cipher_set_tag_size(&ses_ptr->cdata, caop->tag_len);
607 else
608 caop->tag_len = max_tag_len;
610 if (caop->op == COP_ENCRYPT) {
611 if (auth_len > 0)
612 cryptodev_cipher_auth(&ses_ptr->cdata, auth_sg, auth_len);
613 else /* for some reason we _have_ to call that */
614 cryptodev_cipher_auth(&ses_ptr->cdata, NULL, 0);
616 ret = cryptodev_cipher_encrypt(&ses_ptr->cdata,
617 src_sg, dst_sg, len);
618 if (unlikely(ret)) {
619 dprintk(0, KERN_ERR, "cryptodev_cipher_encrypt: %d\n", ret);
620 return ret;
622 kcaop->dst_len = len + caop->tag_len;
623 caop->tag = caop->dst + len;
624 } else {
625 if (auth_len > 0)
626 cryptodev_cipher_auth(&ses_ptr->cdata, auth_sg, auth_len);
628 ret = cryptodev_cipher_decrypt(&ses_ptr->cdata,
629 src_sg, dst_sg, len);
631 if (unlikely(ret)) {
632 dprintk(0, KERN_ERR, "cryptodev_cipher_decrypt: %d\n", ret);
633 return ret;
635 kcaop->dst_len = len - caop->tag_len;
636 caop->tag = caop->dst + len - caop->tag_len;
639 return 0;
642 /* This is the main crypto function - zero-copy edition */
643 static int
644 __crypto_auth_run_zc(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop)
646 struct scatterlist *dst_sg, *auth_sg, *src_sg;
647 struct crypt_auth_op *caop = &kcaop->caop;
648 int ret = 0, pagecount = 0;
650 if (caop->flags & COP_FLAG_AEAD_SRTP_TYPE) {
651 if (unlikely(ses_ptr->cdata.init != 0 &&
652 (ses_ptr->cdata.stream == 0 || ses_ptr->cdata.aead != 0)))
654 dprintk(0, KERN_ERR, "Only stream modes are allowed in SRTP mode (but not AEAD)\n");
655 return -EINVAL;
658 ret = get_userbuf_srtp(ses_ptr, kcaop, &auth_sg, &dst_sg, &pagecount);
659 if (unlikely(ret)) {
660 dprintk(1, KERN_ERR, "get_userbuf_srtp(): Error getting user pages.\n");
661 return ret;
664 ret = srtp_auth_n_crypt(ses_ptr, kcaop, auth_sg, caop->auth_len,
665 dst_sg, caop->len);
666 } else { /* TLS and normal cases. Here auth data are usually small
667 * so we just copy them to a free page, instead of trying
668 * to map them.
670 unsigned char* auth_buf = NULL;
671 struct scatterlist tmp;
673 if (unlikely(caop->auth_len > PAGE_SIZE))
674 return -EINVAL;
676 auth_buf = (char *)__get_free_page(GFP_KERNEL);
677 if (unlikely(!auth_buf))
678 return -ENOMEM;
680 if (caop->auth_len > 0) {
681 if (unlikely(copy_from_user(auth_buf, caop->auth_src, caop->auth_len))) {
682 ret = -EFAULT;
683 goto fail;
686 sg_init_one(&tmp, auth_buf, caop->auth_len);
687 auth_sg = &tmp;
688 } else {
689 auth_sg = NULL;
692 if (caop->flags & COP_FLAG_AEAD_TLS_TYPE && ses_ptr->cdata.aead == 0) {
693 ret = get_userbuf_tls(ses_ptr, kcaop, &dst_sg, &pagecount);
694 if (unlikely(ret)) {
695 dprintk(1, KERN_ERR, "get_userbuf_tls(): Error getting user pages.\n");
696 goto fail;
699 ret = tls_auth_n_crypt(ses_ptr, kcaop, auth_sg, caop->auth_len,
700 dst_sg, caop->len);
701 } else {
702 int dst_len;
704 if (unlikely(ses_ptr->cdata.init == 0 ||
705 ses_ptr->cdata.stream == 0 ||
706 ses_ptr->cdata.aead == 0))
708 dprintk(0, KERN_ERR, "Only stream and AEAD ciphers are allowed for authenc\n");
709 return -EINVAL;
712 if (caop->op == COP_ENCRYPT) dst_len = caop->len + cryptodev_cipher_get_tag_size(&ses_ptr->cdata);
713 else dst_len = caop->len - cryptodev_cipher_get_tag_size(&ses_ptr->cdata);
715 ret = get_userbuf(ses_ptr, caop->src, caop->len, caop->dst, dst_len,
716 kcaop->task, kcaop->mm, &src_sg, &dst_sg, &pagecount);
717 if (unlikely(ret)) {
718 dprintk(1, KERN_ERR, "get_userbuf(): Error getting user pages.\n");
719 goto fail;
722 ret = auth_n_crypt(ses_ptr, kcaop, auth_sg, caop->auth_len,
723 src_sg, dst_sg, caop->len);
726 fail:
727 free_page((unsigned long)auth_buf);
730 release_user_pages(ses_ptr->pages, pagecount);
731 return ret;
735 int crypto_auth_run(struct fcrypt *fcr, struct kernel_crypt_auth_op *kcaop)
737 struct csession *ses_ptr;
738 struct crypt_auth_op *caop = &kcaop->caop;
739 int ret;
741 if (unlikely(caop->op != COP_ENCRYPT && caop->op != COP_DECRYPT)) {
742 dprintk(1, KERN_DEBUG, "invalid operation op=%u\n", caop->op);
743 return -EINVAL;
746 /* this also enters ses_ptr->sem */
747 ses_ptr = crypto_get_session_by_sid(fcr, caop->ses);
748 if (unlikely(!ses_ptr)) {
749 dprintk(1, KERN_ERR, "invalid session ID=0x%08X\n", caop->ses);
750 return -EINVAL;
753 if (unlikely(ses_ptr->cdata.init == 0)) {
754 dprintk(1, KERN_ERR, "cipher context not initialized\n");
755 ret = -EINVAL;
756 goto out_unlock;
759 /* If we have a hash/mac handle reset its state */
760 if (ses_ptr->hdata.init != 0) {
761 ret = cryptodev_hash_reset(&ses_ptr->hdata);
762 if (unlikely(ret)) {
763 dprintk(1, KERN_ERR,
764 "error in cryptodev_hash_reset()\n");
765 goto out_unlock;
769 cryptodev_cipher_set_iv(&ses_ptr->cdata, kcaop->iv,
770 min(ses_ptr->cdata.ivsize, kcaop->ivlen));
772 if (likely(caop->len || caop->auth_len)) {
773 ret = __crypto_auth_run_zc(ses_ptr, kcaop);
774 if (unlikely(ret))
775 goto out_unlock;
776 } else {
777 ret = -EINVAL;
778 goto out_unlock;
781 ret = 0;
783 cryptodev_cipher_get_iv(&ses_ptr->cdata, kcaop->iv,
784 min(ses_ptr->cdata.ivsize, kcaop->ivlen));
786 out_unlock:
787 crypto_put_session(ses_ptr);
788 return ret;