corrected bug when having additional data in TLS.
[cryptodev-linux.git] / cryptodev_auth.c
blob6e7088a2e0945bcffe9d7cd024ee86e2f17b0392
1 /*
2 * Driver for /dev/crypto device (aka CryptoDev)
4 * Copyright (c) 2011 Nikos Mavrogiannopoulos <nmav@gnutls.org>
6 * This file is part of linux cryptodev.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc.,
21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
25 * This file handles the AEAD part of /dev/crypto.
29 #include <crypto/hash.h>
30 #include <linux/crypto.h>
31 #include <linux/mm.h>
32 #include <linux/highmem.h>
33 #include <linux/ioctl.h>
34 #include <linux/random.h>
35 #include <linux/syscalls.h>
36 #include <linux/pagemap.h>
37 #include <linux/poll.h>
38 #include <linux/uaccess.h>
39 #include <crypto/cryptodev.h>
40 #include <crypto/scatterwalk.h>
41 #include <linux/scatterlist.h>
42 #include "cryptodev_int.h"
43 #include "version.h"
46 /* make cop->src and cop->dst available in scatterlists */
47 static int get_userbuf_aead(struct csession *ses, struct kernel_crypt_auth_op *kcaop,
48 struct scatterlist **auth_sg, struct scatterlist **dst_sg,
49 int *tot_pages)
51 int dst_pagecount = 0, pagecount;
52 int auth_pagecount = 0;
53 struct crypt_auth_op *caop = &kcaop->caop;
54 int rc;
56 if (caop->dst == NULL && caop->auth_src == NULL)
57 return -EINVAL;
59 if (ses->alignmask) {
60 if (!IS_ALIGNED((unsigned long)caop->dst, ses->alignmask))
61 dprintk(2, KERN_WARNING, "%s: careful - source address %lx is not %d byte aligned\n",
62 __func__, (unsigned long)caop->dst, ses->alignmask + 1);
63 if (!IS_ALIGNED((unsigned long)caop->auth_src, ses->alignmask))
64 dprintk(2, KERN_WARNING, "%s: careful - source address %lx is not %d byte aligned\n",
65 __func__, (unsigned long)caop->auth_src, ses->alignmask + 1);
68 if (kcaop->dst_len == 0) {
69 dprintk(1, KERN_WARNING, "Destination length cannot be zero\n");
70 return -EINVAL;
73 if (caop->auth_len > 0)
74 auth_pagecount = PAGECOUNT(caop->auth_src, caop->auth_len);
76 dst_pagecount = PAGECOUNT(caop->dst, kcaop->dst_len);
78 (*tot_pages) = pagecount = auth_pagecount + dst_pagecount;
80 rc = adjust_sg_array(ses, pagecount);
81 if (rc)
82 return rc;
84 if (auth_pagecount > 0) {
85 rc = __get_userbuf(caop->auth_src, caop->auth_len, 0, auth_pagecount,
86 ses->pages, ses->sg, kcaop->task, kcaop->mm);
87 if (unlikely(rc)) {
88 dprintk(1, KERN_ERR,
89 "failed to get user pages for data input\n");
90 return -EINVAL;
92 (*auth_sg) = ses->sg;
93 (*dst_sg) = ses->sg + auth_pagecount;
94 } else {
95 (*auth_sg) = NULL;
96 (*dst_sg) = ses->sg;
99 rc = __get_userbuf(caop->dst, kcaop->dst_len, 1, dst_pagecount,
100 ses->pages + auth_pagecount, *dst_sg, kcaop->task, kcaop->mm);
101 if (unlikely(rc)) {
102 release_user_pages(ses->pages, auth_pagecount);
103 dprintk(1, KERN_ERR,
104 "failed to get user pages for data input\n");
105 return -EINVAL;
108 return 0;
111 static int get_userbuf_srtp(struct csession *ses, struct kernel_crypt_auth_op *kcaop,
112 struct scatterlist **auth_sg, struct scatterlist **dst_sg,
113 int *tot_pages)
115 int pagecount, diff;
116 int auth_pagecount = 0;
117 struct crypt_auth_op *caop = &kcaop->caop;
118 int rc;
120 if (caop->dst == NULL && caop->auth_src == NULL)
121 return -EINVAL;
123 if (ses->alignmask) {
124 if (!IS_ALIGNED((unsigned long)caop->dst, ses->alignmask))
125 dprintk(2, KERN_WARNING, "%s: careful - source address %lx is not %d byte aligned\n",
126 __func__, (unsigned long)caop->dst, ses->alignmask + 1);
127 if (!IS_ALIGNED((unsigned long)caop->auth_src, ses->alignmask))
128 dprintk(2, KERN_WARNING, "%s: careful - source address %lx is not %d byte aligned\n",
129 __func__, (unsigned long)caop->auth_src, ses->alignmask + 1);
132 if (unlikely(kcaop->dst_len == 0 || caop->auth_len == 0)) {
133 dprintk(1, KERN_WARNING, "Destination length cannot be zero\n");
134 return -EINVAL;
137 /* Note that in SRTP auth data overlap with data to be encrypted (dst)
140 auth_pagecount = PAGECOUNT(caop->auth_src, caop->auth_len);
141 diff = (int)(caop->src - caop->auth_src);
142 if (diff > 256 || diff < 0) {
143 dprintk(1, KERN_WARNING, "auth_src must overlap with src (diff: %d).\n", diff);
144 return -EINVAL;
147 (*tot_pages) = pagecount = auth_pagecount;
149 rc = adjust_sg_array(ses, pagecount);
150 if (rc)
151 return rc;
153 rc = __get_userbuf(caop->auth_src, caop->auth_len, 1, auth_pagecount,
154 ses->pages, ses->sg, kcaop->task, kcaop->mm);
155 if (unlikely(rc)) {
156 dprintk(1, KERN_ERR,
157 "failed to get user pages for data input\n");
158 return -EINVAL;
160 (*auth_sg) = ses->sg;
162 memcpy(&ses->sg2, ses->sg, sizeof(ses->sg[0]));
163 ses->sg2.offset += diff;
164 (*dst_sg) = &ses->sg2;
166 return 0;
170 static int fill_kcaop_from_caop(struct kernel_crypt_auth_op *kcaop, struct fcrypt *fcr)
172 struct crypt_auth_op *caop = &kcaop->caop;
173 struct csession *ses_ptr;
174 int rc;
176 /* this also enters ses_ptr->sem */
177 ses_ptr = crypto_get_session_by_sid(fcr, caop->ses);
178 if (unlikely(!ses_ptr)) {
179 dprintk(1, KERN_ERR, "invalid session ID=0x%08X\n", caop->ses);
180 return -EINVAL;
183 if (caop->src != caop->dst) {
184 dprintk(2, KERN_ERR,
185 "Non-inplace encryption and decryption is not efficient\n");
186 if (caop->len > sizeof(ses_ptr->buffer)) {
187 dprintk(1, KERN_ERR,
188 "The provided buffer is too large\n");
189 rc = -EINVAL;
190 goto out_unlock;
193 if (unlikely(copy_from_user(ses_ptr->buffer, caop->src, caop->len))) {
194 rc = -EFAULT;
195 goto out_unlock;
198 if (unlikely(copy_to_user(caop->dst, ses_ptr->buffer, caop->len))) {
199 rc = -EFAULT;
200 goto out_unlock;
205 if (caop->tag_len == 0)
206 caop->tag_len = ses_ptr->hdata.digestsize;
208 kcaop->ivlen = caop->iv ? ses_ptr->cdata.ivsize : 0;
210 if (caop->flags & COP_FLAG_AEAD_TLS_TYPE)
211 kcaop->dst_len = caop->len + ses_ptr->cdata.blocksize /* pad */ + caop->tag_len;
212 else
213 kcaop->dst_len = caop->len;
215 kcaop->task = current;
216 kcaop->mm = current->mm;
218 if (caop->iv) {
219 rc = copy_from_user(kcaop->iv, caop->iv, kcaop->ivlen);
220 if (unlikely(rc)) {
221 dprintk(1, KERN_ERR,
222 "error copying IV (%d bytes), copy_from_user returned %d for address %lx\n",
223 kcaop->ivlen, rc, (unsigned long)caop->iv);
224 rc = -EFAULT;
225 goto out_unlock;
229 rc = 0;
231 out_unlock:
232 crypto_put_session(ses_ptr);
233 return rc;
237 static int fill_caop_from_kcaop(struct kernel_crypt_auth_op *kcaop, struct fcrypt *fcr)
239 int ret;
241 kcaop->caop.len = kcaop->dst_len;
243 if (kcaop->ivlen && kcaop->caop.flags & COP_FLAG_WRITE_IV) {
244 ret = copy_to_user(kcaop->caop.iv,
245 kcaop->iv, kcaop->ivlen);
246 if (unlikely(ret)) {
247 dprintk(1, KERN_ERR, "Error in copying to userspace\n");
248 return -EFAULT;
251 return 0;
255 int kcaop_from_user(struct kernel_crypt_auth_op *kcaop,
256 struct fcrypt *fcr, void __user *arg)
258 if (unlikely(copy_from_user(&kcaop->caop, arg, sizeof(kcaop->caop)))) {
259 dprintk(1, KERN_ERR, "Error in copying from userspace\n");
260 return -EFAULT;
263 return fill_kcaop_from_caop(kcaop, fcr);
266 int kcaop_to_user(struct kernel_crypt_auth_op *kcaop,
267 struct fcrypt *fcr, void __user *arg)
269 int ret;
271 ret = fill_caop_from_kcaop(kcaop, fcr);
272 if (unlikely(ret)) {
273 dprintk(1, KERN_ERR, "fill_caop_from_kcaop\n");
274 return ret;
277 if (unlikely(copy_to_user(arg, &kcaop->caop, sizeof(kcaop->caop)))) {
278 dprintk(1, KERN_ERR, "Error in copying to userspace\n");
279 return -EFAULT;
281 return 0;
284 static void copy_hash( struct scatterlist *dst_sg, int len, void* hash, int hash_len)
286 scatterwalk_map_and_copy(hash, dst_sg, len, hash_len, 1);
289 static void read_hash( struct scatterlist *dst_sg, int len, void* hash, int hash_len)
291 scatterwalk_map_and_copy(hash, dst_sg, len-hash_len, hash_len, 0);
294 static int pad_record( struct scatterlist *dst_sg, int len, int block_size)
296 uint8_t pad[block_size];
297 int pad_size = block_size - (len % block_size);
299 memset(pad, pad_size-1, pad_size);
301 scatterwalk_map_and_copy(pad, dst_sg, len, pad_size, 1);
303 return pad_size;
306 static int verify_record_pad( struct scatterlist *dst_sg, int len, int block_size)
308 uint8_t pad[256]; /* the maximum allowed */
309 uint8_t pad_size;
310 int i;
312 scatterwalk_map_and_copy(&pad_size, dst_sg, len-1, 1, 0);
313 pad_size++;
315 if (pad_size > len)
316 return -ECANCELED;
318 scatterwalk_map_and_copy(pad, dst_sg, len-pad_size, pad_size, 0);
320 for (i=0;i<pad_size;i++)
321 if (pad[i] != pad_size)
322 return -ECANCELED;
324 return 0;
327 static int
328 tls_auth_n_crypt(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop,
329 struct scatterlist *auth_sg, uint32_t auth_len,
330 struct scatterlist *dst_sg, uint32_t len)
332 int ret, fail = 0;
333 struct crypt_auth_op *caop = &kcaop->caop;
334 uint8_t vhash[AALG_MAX_RESULT_LEN];
335 uint8_t hash_output[AALG_MAX_RESULT_LEN];
337 /* TLS authenticates the plaintext except for the padding.
339 if (caop->op == COP_ENCRYPT) {
340 if (ses_ptr->hdata.init != 0) {
341 if (auth_len > 0) {
342 ret = cryptodev_hash_update(&ses_ptr->hdata,
343 auth_sg, auth_len);
344 if (unlikely(ret)) {
345 dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
346 goto out_err;
350 if (len > 0) {
351 ret = cryptodev_hash_update(&ses_ptr->hdata,
352 dst_sg, len);
353 if (unlikely(ret)) {
354 dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
355 goto out_err;
359 ret = cryptodev_hash_final(&ses_ptr->hdata, hash_output);
360 if (unlikely(ret)) {
361 dprintk(0, KERN_ERR, "cryptodev_hash_final: %d\n", ret);
362 goto out_err;
365 copy_hash( dst_sg, len, hash_output, caop->tag_len);
366 len += caop->tag_len;
369 if (ses_ptr->cdata.init != 0) {
370 if (ses_ptr->cdata.blocksize > 1) {
371 ret = pad_record(dst_sg, len, ses_ptr->cdata.blocksize);
372 len += ret;
375 ret = cryptodev_cipher_encrypt(&ses_ptr->cdata,
376 dst_sg, dst_sg, len);
377 if (unlikely(ret)) {
378 dprintk(0, KERN_ERR, "cryptodev_cipher_encrypt: %d\n", ret);
379 goto out_err;
382 } else {
383 if (ses_ptr->cdata.init != 0) {
384 ret = cryptodev_cipher_decrypt(&ses_ptr->cdata,
385 dst_sg, dst_sg, len);
387 if (unlikely(ret)) {
388 dprintk(0, KERN_ERR, "cryptodev_cipher_decrypt: %d\n", ret);
389 goto out_err;
392 if (ses_ptr->cdata.blocksize > 1) {
393 ret = verify_record_pad(dst_sg, len, ses_ptr->cdata.blocksize);
394 if (unlikely(ret)) {
395 dprintk(0, KERN_ERR, "verify_record_pad: %d\n", ret);
396 fail = 1;
397 } else
398 len -= ret;
402 if (ses_ptr->hdata.init != 0) {
403 if (unlikely(caop->tag_len > sizeof(vhash) || caop->tag_len > len)) {
404 dprintk(1, KERN_ERR, "Illegal tag len size\n");
405 ret = -EINVAL;
406 goto out_err;
409 read_hash( dst_sg, len, vhash, caop->tag_len);
410 len -= caop->tag_len;
412 if (auth_len > 0) {
413 ret = cryptodev_hash_update(&ses_ptr->hdata,
414 auth_sg, auth_len);
415 if (unlikely(ret)) {
416 dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
417 goto out_err;
421 if (len > 0) {
422 ret = cryptodev_hash_update(&ses_ptr->hdata,
423 dst_sg, len);
424 if (unlikely(ret)) {
425 dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
426 goto out_err;
430 ret = cryptodev_hash_final(&ses_ptr->hdata, hash_output);
431 if (unlikely(ret)) {
432 dprintk(0, KERN_ERR, "cryptodev_hash_final: %d\n", ret);
433 goto out_err;
436 if (memcmp(vhash, hash_output, caop->tag_len) != 0 || fail != 0) {
437 dprintk(1, KERN_ERR, "MAC verification failed\n");
438 ret = -ECANCELED;
439 goto out_err;
443 kcaop->dst_len = len;
444 return 0;
445 out_err:
446 return ret;
449 static int
450 srtp_auth_n_crypt(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop,
451 struct scatterlist *auth_sg, uint32_t auth_len,
452 struct scatterlist *dst_sg, uint32_t len)
454 int ret, fail = 0;
455 struct crypt_auth_op *caop = &kcaop->caop;
456 uint8_t vhash[AALG_MAX_RESULT_LEN];
457 uint8_t hash_output[AALG_MAX_RESULT_LEN];
459 /* SRTP authenticates the encrypted data.
461 if (caop->op == COP_ENCRYPT) {
462 if (ses_ptr->cdata.init != 0) {
463 if (ses_ptr->cdata.stream == 0) {
464 dprintk(0, KERN_ERR, "Only stream modes are allowed in SRTP mode\n");
465 ret = -EINVAL;
466 goto out_err;
469 ret = cryptodev_cipher_encrypt(&ses_ptr->cdata,
470 dst_sg, dst_sg, len);
471 if (unlikely(ret)) {
472 dprintk(0, KERN_ERR, "cryptodev_cipher_encrypt: %d\n", ret);
473 goto out_err;
477 if (ses_ptr->hdata.init != 0) {
478 if (auth_len > 0) {
479 ret = cryptodev_hash_update(&ses_ptr->hdata,
480 auth_sg, auth_len);
481 if (unlikely(ret)) {
482 dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
483 goto out_err;
487 ret = cryptodev_hash_final(&ses_ptr->hdata, hash_output);
488 if (unlikely(ret)) {
489 dprintk(0, KERN_ERR, "cryptodev_hash_final: %d\n", ret);
490 goto out_err;
493 if (unlikely(copy_to_user(caop->tag, hash_output, caop->tag_len))) {
494 ret = -EFAULT;
495 goto out_err;
499 } else {
500 if (ses_ptr->hdata.init != 0) {
501 if (unlikely(caop->tag_len > sizeof(vhash) || caop->tag_len > len)) {
502 dprintk(1, KERN_ERR, "Illegal tag len size\n");
503 ret = -EINVAL;
504 goto out_err;
507 if (unlikely(copy_from_user(vhash, caop->tag, caop->tag_len))) {
508 ret = -EFAULT;
509 goto out_err;
512 ret = cryptodev_hash_update(&ses_ptr->hdata,
513 auth_sg, auth_len);
514 if (unlikely(ret)) {
515 dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
516 goto out_err;
519 ret = cryptodev_hash_final(&ses_ptr->hdata, hash_output);
520 if (unlikely(ret)) {
521 dprintk(0, KERN_ERR, "cryptodev_hash_final: %d\n", ret);
522 goto out_err;
525 if (memcmp(vhash, hash_output, caop->tag_len) != 0 || fail != 0) {
526 dprintk(1, KERN_ERR, "MAC verification failed\n");
527 ret = -ECANCELED;
528 goto out_err;
532 if (ses_ptr->cdata.init != 0) {
533 if (ses_ptr->cdata.stream == 0) {
534 dprintk(0, KERN_ERR, "Only stream modes are allowed in SRTP mode\n");
535 ret = -EINVAL;
536 goto out_err;
539 ret = cryptodev_cipher_decrypt(&ses_ptr->cdata,
540 dst_sg, dst_sg, len);
542 if (unlikely(ret)) {
543 dprintk(0, KERN_ERR, "cryptodev_cipher_decrypt: %d\n", ret);
544 goto out_err;
549 kcaop->dst_len = len;
550 return 0;
551 out_err:
552 return ret;
555 /* This is the main crypto function - zero-copy edition */
556 static int
557 __crypto_auth_run_zc(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop)
559 struct scatterlist *dst_sg, *auth_sg;
560 struct crypt_auth_op *caop = &kcaop->caop;
561 int ret = 0, pagecount;
563 if (caop->flags & COP_FLAG_AEAD_TLS_TYPE) {
564 ret = get_userbuf_aead(ses_ptr, kcaop, &auth_sg, &dst_sg, &pagecount);
565 if (unlikely(ret)) {
566 dprintk(1, KERN_ERR, "Error getting user pages.\n");
567 return ret;
570 ret = tls_auth_n_crypt(ses_ptr, kcaop, auth_sg, caop->auth_len,
571 dst_sg, caop->len);
572 } else if (caop->flags & COP_FLAG_AEAD_SRTP_TYPE) {
573 ret = get_userbuf_srtp(ses_ptr, kcaop, &auth_sg, &dst_sg, &pagecount);
574 if (unlikely(ret)) {
575 dprintk(1, KERN_ERR, "Error getting user pages.\n");
576 return ret;
579 ret = srtp_auth_n_crypt(ses_ptr, kcaop, auth_sg, caop->auth_len,
580 dst_sg, caop->len);
581 } else {
582 dprintk(1, KERN_ERR, "Unsupported flag for authenc\n");
583 return -EINVAL;
586 release_user_pages(ses_ptr->pages, pagecount);
587 return ret;
591 int crypto_auth_run(struct fcrypt *fcr, struct kernel_crypt_auth_op *kcaop)
593 struct csession *ses_ptr;
594 struct crypt_auth_op *caop = &kcaop->caop;
595 int ret;
597 if (unlikely(caop->op != COP_ENCRYPT && caop->op != COP_DECRYPT)) {
598 dprintk(1, KERN_DEBUG, "invalid operation op=%u\n", caop->op);
599 return -EINVAL;
602 /* this also enters ses_ptr->sem */
603 ses_ptr = crypto_get_session_by_sid(fcr, caop->ses);
604 if (unlikely(!ses_ptr)) {
605 dprintk(1, KERN_ERR, "invalid session ID=0x%08X\n", caop->ses);
606 return -EINVAL;
609 if (unlikely(ses_ptr->cdata.init == 0)) {
610 dprintk(1, KERN_ERR, "cipher context not initialized\n");
611 return -EINVAL;
614 if (ses_ptr->hdata.init != 0) {
615 ret = cryptodev_hash_reset(&ses_ptr->hdata);
616 if (unlikely(ret)) {
617 dprintk(1, KERN_ERR,
618 "error in cryptodev_hash_reset()\n");
619 goto out_unlock;
623 cryptodev_cipher_set_iv(&ses_ptr->cdata, kcaop->iv,
624 min(ses_ptr->cdata.ivsize, kcaop->ivlen));
626 if (likely(caop->len || caop->auth_len)) {
627 ret = __crypto_auth_run_zc(ses_ptr, kcaop);
628 if (unlikely(ret))
629 goto out_unlock;
632 cryptodev_cipher_get_iv(&ses_ptr->cdata, kcaop->iv,
633 min(ses_ptr->cdata.ivsize, kcaop->ivlen));
635 out_unlock:
636 mutex_unlock(&ses_ptr->sem);
637 return ret;