Added unit tests for SRTP and TLS records.
[cryptodev-linux.git] / cryptodev_auth.c
blob597515989daa8e76066bdfd1aebaf9d4e0769085
1 /*
2 * Driver for /dev/crypto device (aka CryptoDev)
4 * Copyright (c) 2011 Nikos Mavrogiannopoulos <nmav@gnutls.org>
6 * This file is part of linux cryptodev.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc.,
21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
25 * This file handles the AEAD part of /dev/crypto.
29 #include <crypto/hash.h>
30 #include <linux/crypto.h>
31 #include <linux/mm.h>
32 #include <linux/highmem.h>
33 #include <linux/ioctl.h>
34 #include <linux/random.h>
35 #include <linux/syscalls.h>
36 #include <linux/pagemap.h>
37 #include <linux/poll.h>
38 #include <linux/uaccess.h>
39 #include <crypto/cryptodev.h>
40 #include <crypto/scatterwalk.h>
41 #include <linux/scatterlist.h>
42 #include "cryptodev_int.h"
43 #include "version.h"
46 /* make cop->src and cop->dst available in scatterlists */
47 static int get_userbuf_aead(struct csession *ses, struct kernel_crypt_auth_op *kcaop,
48 struct scatterlist **auth_sg, struct scatterlist **dst_sg,
49 int *tot_pages)
51 int dst_pagecount = 0, pagecount;
52 int auth_pagecount = 0;
53 struct crypt_auth_op *caop = &kcaop->caop;
54 int rc;
56 if (caop->dst == NULL && caop->auth_src == NULL)
57 return -EINVAL;
59 if (ses->alignmask) {
60 if (!IS_ALIGNED((unsigned long)caop->dst, ses->alignmask))
61 dprintk(2, KERN_WARNING, "%s: careful - source address %lx is not %d byte aligned\n",
62 __func__, (unsigned long)caop->dst, ses->alignmask + 1);
63 if (!IS_ALIGNED((unsigned long)caop->auth_src, ses->alignmask))
64 dprintk(2, KERN_WARNING, "%s: careful - source address %lx is not %d byte aligned\n",
65 __func__, (unsigned long)caop->auth_src, ses->alignmask + 1);
68 if (kcaop->dst_len == 0) {
69 dprintk(1, KERN_WARNING, "Destination length cannot be zero\n");
70 return -EINVAL;
73 if (caop->auth_len > 0)
74 auth_pagecount = PAGECOUNT(caop->auth_src, caop->auth_len);
76 dst_pagecount = PAGECOUNT(caop->dst, kcaop->dst_len);
78 (*tot_pages) = pagecount = auth_pagecount + dst_pagecount;
80 rc = adjust_sg_array(ses, pagecount);
81 if (rc)
82 return rc;
84 if (auth_pagecount > 0) {
85 rc = __get_userbuf(caop->auth_src, caop->auth_len, 0, auth_pagecount,
86 ses->pages, ses->sg, kcaop->task, kcaop->mm);
87 if (unlikely(rc)) {
88 dprintk(1, KERN_ERR,
89 "failed to get user pages for data input\n");
90 return -EINVAL;
92 (*auth_sg) = ses->sg;
93 (*dst_sg) = ses->sg + auth_pagecount;
94 } else {
95 (*auth_sg) = NULL;
96 (*dst_sg) = ses->sg;
99 rc = __get_userbuf(caop->dst, kcaop->dst_len, 1, dst_pagecount,
100 ses->pages + auth_pagecount, *dst_sg, kcaop->task, kcaop->mm);
101 if (unlikely(rc)) {
102 release_user_pages(ses->pages, auth_pagecount);
103 dprintk(1, KERN_ERR,
104 "failed to get user pages for data input\n");
105 return -EINVAL;
108 return 0;
111 /* Taken from Maxim Levitsky's patch
113 static struct scatterlist *sg_advance(struct scatterlist *sg, int consumed)
115 while (consumed >= sg->length) {
116 consumed -= sg->length;
118 sg = sg_next(sg);
119 if (!sg)
120 break;
123 WARN_ON(!sg && consumed);
125 if (!sg)
126 return NULL;
128 sg->offset += consumed;
129 sg->length -= consumed;
131 if (sg->offset >= PAGE_SIZE) {
132 struct page *page =
133 nth_page(sg_page(sg), sg->offset / PAGE_SIZE);
134 sg_set_page(sg, page, sg->length, sg->offset % PAGE_SIZE);
137 return sg;
141 * sg_copy - copies sg entries from sg_from to sg_to, such
142 * as sg_to covers first 'len' bytes from sg_from.
144 static int sg_copy(struct scatterlist *sg_from, struct scatterlist *sg_to, int len)
146 while (len > sg_from->length) {
147 len -= sg_from->length;
149 sg_set_page(sg_to, sg_page(sg_from),
150 sg_from->length, sg_from->offset);
152 sg_to = sg_next(sg_to);
153 sg_from = sg_next(sg_from);
155 if (len && (!sg_from || !sg_to))
156 return -ENOMEM;
159 if (len)
160 sg_set_page(sg_to, sg_page(sg_from),
161 len, sg_from->offset);
162 sg_mark_end(sg_to);
163 return 0;
166 static int get_userbuf_srtp(struct csession *ses, struct kernel_crypt_auth_op *kcaop,
167 struct scatterlist **auth_sg, struct scatterlist **dst_sg,
168 int *tot_pages)
170 int pagecount, diff;
171 int auth_pagecount = 0;
172 struct crypt_auth_op *caop = &kcaop->caop;
173 int rc;
175 if (caop->dst == NULL && caop->auth_src == NULL)
176 return -EINVAL;
178 if (ses->alignmask) {
179 if (!IS_ALIGNED((unsigned long)caop->dst, ses->alignmask))
180 dprintk(2, KERN_WARNING, "%s: careful - source address %lx is not %d byte aligned\n",
181 __func__, (unsigned long)caop->dst, ses->alignmask + 1);
182 if (!IS_ALIGNED((unsigned long)caop->auth_src, ses->alignmask))
183 dprintk(2, KERN_WARNING, "%s: careful - source address %lx is not %d byte aligned\n",
184 __func__, (unsigned long)caop->auth_src, ses->alignmask + 1);
187 if (unlikely(kcaop->dst_len == 0 || caop->auth_len == 0)) {
188 dprintk(1, KERN_WARNING, "Destination length cannot be zero\n");
189 return -EINVAL;
192 /* Note that in SRTP auth data overlap with data to be encrypted (dst)
195 auth_pagecount = PAGECOUNT(caop->auth_src, caop->auth_len);
196 diff = (int)(caop->src - caop->auth_src);
197 if (diff > PAGE_SIZE || diff < 0) {
198 dprintk(1, KERN_WARNING, "auth_src must overlap with src (diff: %d).\n", diff);
199 return -EINVAL;
202 (*tot_pages) = pagecount = auth_pagecount;
204 rc = adjust_sg_array(ses, pagecount*2); /* double pages to have pages for dst(=auth_src) */
205 if (rc)
206 return rc;
208 rc = __get_userbuf(caop->auth_src, caop->auth_len, 1, auth_pagecount,
209 ses->pages, ses->sg, kcaop->task, kcaop->mm);
210 if (unlikely(rc)) {
211 dprintk(1, KERN_ERR,
212 "failed to get user pages for data input\n");
213 return -EINVAL;
215 (*auth_sg) = ses->sg;
217 (*dst_sg) = ses->sg + auth_pagecount;
218 sg_init_table(*dst_sg, auth_pagecount);
219 sg_copy(ses->sg, (*dst_sg), caop->auth_len);
220 (*dst_sg) = sg_advance(*dst_sg, diff);
221 if (*dst_sg == NULL) {
222 release_user_pages(ses->pages, pagecount);
223 dprintk(1, KERN_ERR,
224 "failed to get enough pages for auth data\n");
225 return -EINVAL;
228 return 0;
231 int copy_from_user_to_user( void* __user dst, void* __user src, int len)
233 uint8_t *buffer;
234 int buffer_size = min(len, 16*1024);
235 int rc;
237 if (len > buffer_size) {
238 dprintk(1, KERN_ERR,
239 "The provided buffer is too large\n");
240 return -EINVAL;
243 buffer = kmalloc(buffer_size, GFP_KERNEL);
244 if (buffer == NULL)
245 return -ENOMEM;
247 if (unlikely(copy_from_user(buffer, src, len))) {
248 rc = -EFAULT;
249 goto out;
252 if (unlikely(copy_to_user(dst, buffer, len))) {
253 rc = -EFAULT;
254 goto out;
257 rc = 0;
258 out:
259 kfree(buffer);
260 return rc;
263 static int fill_kcaop_from_caop(struct kernel_crypt_auth_op *kcaop, struct fcrypt *fcr)
265 struct crypt_auth_op *caop = &kcaop->caop;
266 struct csession *ses_ptr;
267 int rc;
269 /* this also enters ses_ptr->sem */
270 ses_ptr = crypto_get_session_by_sid(fcr, caop->ses);
271 if (unlikely(!ses_ptr)) {
272 dprintk(1, KERN_ERR, "invalid session ID=0x%08X\n", caop->ses);
273 return -EINVAL;
276 if (caop->src != caop->dst) {
277 dprintk(2, KERN_ERR,
278 "Non-inplace encryption and decryption is not efficient\n");
280 rc = copy_from_user_to_user( caop->dst, caop->src, caop->len);
281 if (rc < 0)
282 goto out_unlock;
287 if (caop->tag_len == 0)
288 caop->tag_len = ses_ptr->hdata.digestsize;
290 kcaop->ivlen = caop->iv ? ses_ptr->cdata.ivsize : 0;
292 if (caop->flags & COP_FLAG_AEAD_TLS_TYPE)
293 kcaop->dst_len = caop->len + ses_ptr->cdata.blocksize /* pad */ + caop->tag_len;
294 else
295 kcaop->dst_len = caop->len;
297 kcaop->task = current;
298 kcaop->mm = current->mm;
300 if (caop->iv) {
301 rc = copy_from_user(kcaop->iv, caop->iv, kcaop->ivlen);
302 if (unlikely(rc)) {
303 dprintk(1, KERN_ERR,
304 "error copying IV (%d bytes), copy_from_user returned %d for address %lx\n",
305 kcaop->ivlen, rc, (unsigned long)caop->iv);
306 rc = -EFAULT;
307 goto out_unlock;
311 rc = 0;
313 out_unlock:
314 crypto_put_session(ses_ptr);
315 return rc;
319 static int fill_caop_from_kcaop(struct kernel_crypt_auth_op *kcaop, struct fcrypt *fcr)
321 int ret;
323 kcaop->caop.len = kcaop->dst_len;
325 if (kcaop->ivlen && kcaop->caop.flags & COP_FLAG_WRITE_IV) {
326 ret = copy_to_user(kcaop->caop.iv,
327 kcaop->iv, kcaop->ivlen);
328 if (unlikely(ret)) {
329 dprintk(1, KERN_ERR, "Error in copying to userspace\n");
330 return -EFAULT;
333 return 0;
337 int kcaop_from_user(struct kernel_crypt_auth_op *kcaop,
338 struct fcrypt *fcr, void __user *arg)
340 if (unlikely(copy_from_user(&kcaop->caop, arg, sizeof(kcaop->caop)))) {
341 dprintk(1, KERN_ERR, "Error in copying from userspace\n");
342 return -EFAULT;
345 return fill_kcaop_from_caop(kcaop, fcr);
348 int kcaop_to_user(struct kernel_crypt_auth_op *kcaop,
349 struct fcrypt *fcr, void __user *arg)
351 int ret;
353 ret = fill_caop_from_kcaop(kcaop, fcr);
354 if (unlikely(ret)) {
355 dprintk(1, KERN_ERR, "fill_caop_from_kcaop\n");
356 return ret;
359 if (unlikely(copy_to_user(arg, &kcaop->caop, sizeof(kcaop->caop)))) {
360 dprintk(1, KERN_ERR, "Error in copying to userspace\n");
361 return -EFAULT;
363 return 0;
366 static void copy_tls_hash( struct scatterlist *dst_sg, int len, void* hash, int hash_len)
368 scatterwalk_map_and_copy(hash, dst_sg, len, hash_len, 1);
371 static void read_tls_hash( struct scatterlist *dst_sg, int len, void* hash, int hash_len)
373 scatterwalk_map_and_copy(hash, dst_sg, len-hash_len, hash_len, 0);
376 static int pad_record( struct scatterlist *dst_sg, int len, int block_size)
378 uint8_t pad[block_size];
379 int pad_size = block_size - (len % block_size);
381 memset(pad, pad_size-1, pad_size);
383 scatterwalk_map_and_copy(pad, dst_sg, len, pad_size, 1);
385 return pad_size;
388 static int verify_tls_record_pad( struct scatterlist *dst_sg, int len, int block_size)
390 uint8_t pad[256]; /* the maximum allowed */
391 uint8_t pad_size;
392 int i;
394 scatterwalk_map_and_copy(&pad_size, dst_sg, len-1, 1, 0);
396 if (pad_size+1 > len) {
397 dprintk(1, KERN_ERR, "Pad size: %d\n", pad_size);
398 return -ECANCELED;
401 scatterwalk_map_and_copy(pad, dst_sg, len-pad_size-1, pad_size+1, 0);
403 for (i=0;i<pad_size;i++)
404 if (pad[i] != pad_size) {
405 dprintk(1, KERN_ERR, "Pad size: %d, pad: %d\n", pad_size, (int)pad[i]);
406 return -ECANCELED;
409 return pad_size+1;
412 static int
413 tls_auth_n_crypt(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop,
414 struct scatterlist *auth_sg, uint32_t auth_len,
415 struct scatterlist *dst_sg, uint32_t len)
417 int ret, fail = 0;
418 struct crypt_auth_op *caop = &kcaop->caop;
419 uint8_t vhash[AALG_MAX_RESULT_LEN];
420 uint8_t hash_output[AALG_MAX_RESULT_LEN];
422 /* TLS authenticates the plaintext except for the padding.
424 if (caop->op == COP_ENCRYPT) {
425 if (ses_ptr->hdata.init != 0) {
426 if (auth_len > 0) {
427 ret = cryptodev_hash_update(&ses_ptr->hdata,
428 auth_sg, auth_len);
429 if (unlikely(ret)) {
430 dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
431 goto out_err;
435 if (len > 0) {
436 ret = cryptodev_hash_update(&ses_ptr->hdata,
437 dst_sg, len);
438 if (unlikely(ret)) {
439 dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
440 goto out_err;
444 ret = cryptodev_hash_final(&ses_ptr->hdata, hash_output);
445 if (unlikely(ret)) {
446 dprintk(0, KERN_ERR, "cryptodev_hash_final: %d\n", ret);
447 goto out_err;
450 copy_tls_hash( dst_sg, len, hash_output, caop->tag_len);
451 len += caop->tag_len;
454 if (ses_ptr->cdata.init != 0) {
455 if (ses_ptr->cdata.blocksize > 1) {
456 ret = pad_record(dst_sg, len, ses_ptr->cdata.blocksize);
457 len += ret;
460 ret = cryptodev_cipher_encrypt(&ses_ptr->cdata,
461 dst_sg, dst_sg, len);
462 if (unlikely(ret)) {
463 dprintk(0, KERN_ERR, "cryptodev_cipher_encrypt: %d\n", ret);
464 goto out_err;
467 } else {
468 if (ses_ptr->cdata.init != 0) {
469 ret = cryptodev_cipher_decrypt(&ses_ptr->cdata,
470 dst_sg, dst_sg, len);
472 if (unlikely(ret)) {
473 dprintk(0, KERN_ERR, "cryptodev_cipher_decrypt: %d\n", ret);
474 goto out_err;
477 if (ses_ptr->cdata.blocksize > 1) {
478 ret = verify_tls_record_pad(dst_sg, len, ses_ptr->cdata.blocksize);
479 if (unlikely(ret < 0)) {
480 dprintk(0, KERN_ERR, "verify_record_pad: %d\n", ret);
481 fail = 1;
482 } else {
483 len -= ret;
488 if (ses_ptr->hdata.init != 0) {
489 if (unlikely(caop->tag_len > sizeof(vhash) || caop->tag_len > len)) {
490 dprintk(1, KERN_ERR, "Illegal tag len size\n");
491 ret = -EINVAL;
492 goto out_err;
495 read_tls_hash( dst_sg, len, vhash, caop->tag_len);
496 len -= caop->tag_len;
498 if (auth_len > 0) {
499 ret = cryptodev_hash_update(&ses_ptr->hdata,
500 auth_sg, auth_len);
501 if (unlikely(ret)) {
502 dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
503 goto out_err;
507 if (len > 0) {
508 ret = cryptodev_hash_update(&ses_ptr->hdata,
509 dst_sg, len);
510 if (unlikely(ret)) {
511 dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
512 goto out_err;
516 ret = cryptodev_hash_final(&ses_ptr->hdata, hash_output);
517 if (unlikely(ret)) {
518 dprintk(0, KERN_ERR, "cryptodev_hash_final: %d\n", ret);
519 goto out_err;
522 if (memcmp(vhash, hash_output, caop->tag_len) != 0 || fail != 0) {
523 dprintk(1, KERN_ERR, "MAC verification failed (tag_len: %d)\n", caop->tag_len);
524 ret = -ECANCELED;
525 goto out_err;
529 kcaop->dst_len = len;
530 return 0;
531 out_err:
532 return ret;
535 static int
536 srtp_auth_n_crypt(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop,
537 struct scatterlist *auth_sg, uint32_t auth_len,
538 struct scatterlist *dst_sg, uint32_t len)
540 int ret, fail = 0;
541 struct crypt_auth_op *caop = &kcaop->caop;
542 uint8_t vhash[AALG_MAX_RESULT_LEN];
543 uint8_t hash_output[AALG_MAX_RESULT_LEN];
545 /* SRTP authenticates the encrypted data.
547 if (caop->op == COP_ENCRYPT) {
548 if (ses_ptr->cdata.init != 0) {
549 if (ses_ptr->cdata.stream == 0) {
550 dprintk(0, KERN_ERR, "Only stream modes are allowed in SRTP mode\n");
551 ret = -EINVAL;
552 goto out_err;
555 ret = cryptodev_cipher_encrypt(&ses_ptr->cdata,
556 dst_sg, dst_sg, len);
557 if (unlikely(ret)) {
558 dprintk(0, KERN_ERR, "cryptodev_cipher_encrypt: %d\n", ret);
559 goto out_err;
563 if (ses_ptr->hdata.init != 0) {
564 if (auth_len > 0) {
565 ret = cryptodev_hash_update(&ses_ptr->hdata,
566 auth_sg, auth_len);
567 if (unlikely(ret)) {
568 dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
569 goto out_err;
573 ret = cryptodev_hash_final(&ses_ptr->hdata, hash_output);
574 if (unlikely(ret)) {
575 dprintk(0, KERN_ERR, "cryptodev_hash_final: %d\n", ret);
576 goto out_err;
579 if (unlikely(copy_to_user(caop->tag, hash_output, caop->tag_len))) {
580 ret = -EFAULT;
581 goto out_err;
585 } else {
586 if (ses_ptr->hdata.init != 0) {
587 if (unlikely(caop->tag_len > sizeof(vhash) || caop->tag_len > len)) {
588 dprintk(1, KERN_ERR, "Illegal tag len size\n");
589 ret = -EINVAL;
590 goto out_err;
593 if (unlikely(copy_from_user(vhash, caop->tag, caop->tag_len))) {
594 ret = -EFAULT;
595 goto out_err;
598 ret = cryptodev_hash_update(&ses_ptr->hdata,
599 auth_sg, auth_len);
600 if (unlikely(ret)) {
601 dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
602 goto out_err;
605 ret = cryptodev_hash_final(&ses_ptr->hdata, hash_output);
606 if (unlikely(ret)) {
607 dprintk(0, KERN_ERR, "cryptodev_hash_final: %d\n", ret);
608 goto out_err;
611 if (memcmp(vhash, hash_output, caop->tag_len) != 0 || fail != 0) {
612 dprintk(1, KERN_ERR, "MAC verification failed\n");
613 ret = -ECANCELED;
614 goto out_err;
618 if (ses_ptr->cdata.init != 0) {
619 if (ses_ptr->cdata.stream == 0) {
620 dprintk(0, KERN_ERR, "Only stream modes are allowed in SRTP mode\n");
621 ret = -EINVAL;
622 goto out_err;
625 ret = cryptodev_cipher_decrypt(&ses_ptr->cdata,
626 dst_sg, dst_sg, len);
628 if (unlikely(ret)) {
629 dprintk(0, KERN_ERR, "cryptodev_cipher_decrypt: %d\n", ret);
630 goto out_err;
635 kcaop->dst_len = len;
636 return 0;
637 out_err:
638 return ret;
641 /* This is the main crypto function - zero-copy edition */
642 static int
643 __crypto_auth_run_zc(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop)
645 struct scatterlist *dst_sg, *auth_sg;
646 struct crypt_auth_op *caop = &kcaop->caop;
647 int ret = 0, pagecount;
649 if (caop->flags & COP_FLAG_AEAD_TLS_TYPE) {
650 ret = get_userbuf_aead(ses_ptr, kcaop, &auth_sg, &dst_sg, &pagecount);
651 if (unlikely(ret)) {
652 dprintk(1, KERN_ERR, "Error getting user pages.\n");
653 return ret;
656 ret = tls_auth_n_crypt(ses_ptr, kcaop, auth_sg, caop->auth_len,
657 dst_sg, caop->len);
658 } else if (caop->flags & COP_FLAG_AEAD_SRTP_TYPE) {
659 ret = get_userbuf_srtp(ses_ptr, kcaop, &auth_sg, &dst_sg, &pagecount);
660 if (unlikely(ret)) {
661 dprintk(1, KERN_ERR, "Error getting user pages.\n");
662 return ret;
665 ret = srtp_auth_n_crypt(ses_ptr, kcaop, auth_sg, caop->auth_len,
666 dst_sg, caop->len);
667 } else {
668 dprintk(1, KERN_ERR, "Unsupported flag for authenc\n");
669 return -EINVAL;
672 release_user_pages(ses_ptr->pages, pagecount);
673 return ret;
677 int crypto_auth_run(struct fcrypt *fcr, struct kernel_crypt_auth_op *kcaop)
679 struct csession *ses_ptr;
680 struct crypt_auth_op *caop = &kcaop->caop;
681 int ret;
683 if (unlikely(caop->op != COP_ENCRYPT && caop->op != COP_DECRYPT)) {
684 dprintk(1, KERN_DEBUG, "invalid operation op=%u\n", caop->op);
685 return -EINVAL;
688 /* this also enters ses_ptr->sem */
689 ses_ptr = crypto_get_session_by_sid(fcr, caop->ses);
690 if (unlikely(!ses_ptr)) {
691 dprintk(1, KERN_ERR, "invalid session ID=0x%08X\n", caop->ses);
692 return -EINVAL;
695 if (unlikely(ses_ptr->cdata.init == 0)) {
696 dprintk(1, KERN_ERR, "cipher context not initialized\n");
697 return -EINVAL;
700 if (ses_ptr->hdata.init != 0) {
701 ret = cryptodev_hash_reset(&ses_ptr->hdata);
702 if (unlikely(ret)) {
703 dprintk(1, KERN_ERR,
704 "error in cryptodev_hash_reset()\n");
705 goto out_unlock;
709 cryptodev_cipher_set_iv(&ses_ptr->cdata, kcaop->iv,
710 min(ses_ptr->cdata.ivsize, kcaop->ivlen));
712 if (likely(caop->len || caop->auth_len)) {
713 ret = __crypto_auth_run_zc(ses_ptr, kcaop);
714 if (unlikely(ret))
715 goto out_unlock;
718 cryptodev_cipher_get_iv(&ses_ptr->cdata, kcaop->iv,
719 min(ses_ptr->cdata.ivsize, kcaop->ivlen));
721 out_unlock:
722 mutex_unlock(&ses_ptr->sem);
723 return ret;