2 * Driver for /dev/crypto device (aka CryptoDev)
4 * Copyright (c) 2011 Nikos Mavrogiannopoulos <nmav@gnutls.org>
6 * This file is part of linux cryptodev.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
25 * This file handles the AEAD part of /dev/crypto.
29 #include <crypto/hash.h>
30 #include <linux/crypto.h>
32 #include <linux/highmem.h>
33 #include <linux/ioctl.h>
34 #include <linux/random.h>
35 #include <linux/syscalls.h>
36 #include <linux/pagemap.h>
37 #include <linux/poll.h>
38 #include <linux/uaccess.h>
39 #include <crypto/cryptodev.h>
40 #include <crypto/scatterwalk.h>
41 #include <linux/scatterlist.h>
42 #include "cryptodev_int.h"
46 /* make cop->src and cop->dst available in scatterlists */
47 static int get_userbuf_aead(struct csession
*ses
, struct kernel_crypt_auth_op
*kcaop
,
48 struct scatterlist
**auth_sg
, struct scatterlist
**dst_sg
,
51 int dst_pagecount
= 0, pagecount
;
52 int auth_pagecount
= 0;
53 struct crypt_auth_op
*caop
= &kcaop
->caop
;
56 if (caop
->dst
== NULL
&& caop
->auth_src
== NULL
)
60 if (!IS_ALIGNED((unsigned long)caop
->dst
, ses
->alignmask
))
61 dprintk(2, KERN_WARNING
, "%s: careful - source address %lx is not %d byte aligned\n",
62 __func__
, (unsigned long)caop
->dst
, ses
->alignmask
+ 1);
63 if (!IS_ALIGNED((unsigned long)caop
->auth_src
, ses
->alignmask
))
64 dprintk(2, KERN_WARNING
, "%s: careful - source address %lx is not %d byte aligned\n",
65 __func__
, (unsigned long)caop
->auth_src
, ses
->alignmask
+ 1);
68 if (kcaop
->dst_len
== 0) {
69 dprintk(1, KERN_WARNING
, "Destination length cannot be zero\n");
73 if (caop
->auth_len
> 0)
74 auth_pagecount
= PAGECOUNT(caop
->auth_src
, caop
->auth_len
);
76 dst_pagecount
= PAGECOUNT(caop
->dst
, kcaop
->dst_len
);
78 (*tot_pages
) = pagecount
= auth_pagecount
+ dst_pagecount
;
80 rc
= adjust_sg_array(ses
, pagecount
);
84 if (auth_pagecount
> 0) {
85 rc
= __get_userbuf(caop
->auth_src
, caop
->auth_len
, 0, auth_pagecount
,
86 ses
->pages
, ses
->sg
, kcaop
->task
, kcaop
->mm
);
89 "failed to get user pages for data input\n");
93 (*dst_sg
) = ses
->sg
+ auth_pagecount
;
99 rc
= __get_userbuf(caop
->dst
, kcaop
->dst_len
, 1, dst_pagecount
,
100 ses
->pages
+ auth_pagecount
, *dst_sg
, kcaop
->task
, kcaop
->mm
);
102 release_user_pages(ses
->pages
, auth_pagecount
);
104 "failed to get user pages for data input\n");
111 /* Taken from Maxim Levitsky's patch
113 static struct scatterlist
*sg_advance(struct scatterlist
*sg
, int consumed
)
115 while (consumed
>= sg
->length
) {
116 consumed
-= sg
->length
;
123 WARN_ON(!sg
&& consumed
);
128 sg
->offset
+= consumed
;
129 sg
->length
-= consumed
;
131 if (sg
->offset
>= PAGE_SIZE
) {
133 nth_page(sg_page(sg
), sg
->offset
/ PAGE_SIZE
);
134 sg_set_page(sg
, page
, sg
->length
, sg
->offset
% PAGE_SIZE
);
141 * sg_copy - copies sg entries from sg_from to sg_to, such
142 * as sg_to covers first 'len' bytes from sg_from.
144 static int sg_copy(struct scatterlist
*sg_from
, struct scatterlist
*sg_to
, int len
)
146 while (len
> sg_from
->length
) {
147 len
-= sg_from
->length
;
149 sg_set_page(sg_to
, sg_page(sg_from
),
150 sg_from
->length
, sg_from
->offset
);
152 sg_to
= sg_next(sg_to
);
153 sg_from
= sg_next(sg_from
);
155 if (len
&& (!sg_from
|| !sg_to
))
160 sg_set_page(sg_to
, sg_page(sg_from
),
161 len
, sg_from
->offset
);
166 static int get_userbuf_srtp(struct csession
*ses
, struct kernel_crypt_auth_op
*kcaop
,
167 struct scatterlist
**auth_sg
, struct scatterlist
**dst_sg
,
171 int auth_pagecount
= 0;
172 struct crypt_auth_op
*caop
= &kcaop
->caop
;
175 if (caop
->dst
== NULL
&& caop
->auth_src
== NULL
)
178 if (ses
->alignmask
) {
179 if (!IS_ALIGNED((unsigned long)caop
->dst
, ses
->alignmask
))
180 dprintk(2, KERN_WARNING
, "%s: careful - source address %lx is not %d byte aligned\n",
181 __func__
, (unsigned long)caop
->dst
, ses
->alignmask
+ 1);
182 if (!IS_ALIGNED((unsigned long)caop
->auth_src
, ses
->alignmask
))
183 dprintk(2, KERN_WARNING
, "%s: careful - source address %lx is not %d byte aligned\n",
184 __func__
, (unsigned long)caop
->auth_src
, ses
->alignmask
+ 1);
187 if (unlikely(kcaop
->dst_len
== 0 || caop
->auth_len
== 0)) {
188 dprintk(1, KERN_WARNING
, "Destination length cannot be zero\n");
192 /* Note that in SRTP auth data overlap with data to be encrypted (dst)
195 auth_pagecount
= PAGECOUNT(caop
->auth_src
, caop
->auth_len
);
196 diff
= (int)(caop
->src
- caop
->auth_src
);
197 if (diff
> PAGE_SIZE
|| diff
< 0) {
198 dprintk(1, KERN_WARNING
, "auth_src must overlap with src (diff: %d).\n", diff
);
202 (*tot_pages
) = pagecount
= auth_pagecount
;
204 rc
= adjust_sg_array(ses
, pagecount
*2); /* double pages to have pages for dst(=auth_src) */
208 rc
= __get_userbuf(caop
->auth_src
, caop
->auth_len
, 1, auth_pagecount
,
209 ses
->pages
, ses
->sg
, kcaop
->task
, kcaop
->mm
);
212 "failed to get user pages for data input\n");
215 (*auth_sg
) = ses
->sg
;
217 (*dst_sg
) = ses
->sg
+ auth_pagecount
;
218 sg_init_table(*dst_sg
, auth_pagecount
);
219 sg_copy(ses
->sg
, (*dst_sg
), caop
->auth_len
);
220 (*dst_sg
) = sg_advance(*dst_sg
, diff
);
221 if (*dst_sg
== NULL
) {
222 release_user_pages(ses
->pages
, pagecount
);
224 "failed to get enough pages for auth data\n");
231 int copy_from_user_to_user( void* __user dst
, void* __user src
, int len
)
234 int buffer_size
= min(len
, 16*1024);
237 if (len
> buffer_size
) {
239 "The provided buffer is too large\n");
243 buffer
= kmalloc(buffer_size
, GFP_KERNEL
);
247 if (unlikely(copy_from_user(buffer
, src
, len
))) {
252 if (unlikely(copy_to_user(dst
, buffer
, len
))) {
263 static int fill_kcaop_from_caop(struct kernel_crypt_auth_op
*kcaop
, struct fcrypt
*fcr
)
265 struct crypt_auth_op
*caop
= &kcaop
->caop
;
266 struct csession
*ses_ptr
;
269 /* this also enters ses_ptr->sem */
270 ses_ptr
= crypto_get_session_by_sid(fcr
, caop
->ses
);
271 if (unlikely(!ses_ptr
)) {
272 dprintk(1, KERN_ERR
, "invalid session ID=0x%08X\n", caop
->ses
);
276 if (caop
->src
!= caop
->dst
) {
278 "Non-inplace encryption and decryption is not efficient\n");
280 rc
= copy_from_user_to_user( caop
->dst
, caop
->src
, caop
->len
);
287 if (caop
->tag_len
== 0)
288 caop
->tag_len
= ses_ptr
->hdata
.digestsize
;
290 kcaop
->ivlen
= caop
->iv
? ses_ptr
->cdata
.ivsize
: 0;
292 if (caop
->flags
& COP_FLAG_AEAD_TLS_TYPE
)
293 kcaop
->dst_len
= caop
->len
+ ses_ptr
->cdata
.blocksize
/* pad */ + caop
->tag_len
;
295 kcaop
->dst_len
= caop
->len
;
297 kcaop
->task
= current
;
298 kcaop
->mm
= current
->mm
;
301 rc
= copy_from_user(kcaop
->iv
, caop
->iv
, kcaop
->ivlen
);
304 "error copying IV (%d bytes), copy_from_user returned %d for address %lx\n",
305 kcaop
->ivlen
, rc
, (unsigned long)caop
->iv
);
314 crypto_put_session(ses_ptr
);
319 static int fill_caop_from_kcaop(struct kernel_crypt_auth_op
*kcaop
, struct fcrypt
*fcr
)
323 kcaop
->caop
.len
= kcaop
->dst_len
;
325 if (kcaop
->ivlen
&& kcaop
->caop
.flags
& COP_FLAG_WRITE_IV
) {
326 ret
= copy_to_user(kcaop
->caop
.iv
,
327 kcaop
->iv
, kcaop
->ivlen
);
329 dprintk(1, KERN_ERR
, "Error in copying to userspace\n");
337 int kcaop_from_user(struct kernel_crypt_auth_op
*kcaop
,
338 struct fcrypt
*fcr
, void __user
*arg
)
340 if (unlikely(copy_from_user(&kcaop
->caop
, arg
, sizeof(kcaop
->caop
)))) {
341 dprintk(1, KERN_ERR
, "Error in copying from userspace\n");
345 return fill_kcaop_from_caop(kcaop
, fcr
);
348 int kcaop_to_user(struct kernel_crypt_auth_op
*kcaop
,
349 struct fcrypt
*fcr
, void __user
*arg
)
353 ret
= fill_caop_from_kcaop(kcaop
, fcr
);
355 dprintk(1, KERN_ERR
, "fill_caop_from_kcaop\n");
359 if (unlikely(copy_to_user(arg
, &kcaop
->caop
, sizeof(kcaop
->caop
)))) {
360 dprintk(1, KERN_ERR
, "Error in copying to userspace\n");
366 static void copy_tls_hash( struct scatterlist
*dst_sg
, int len
, void* hash
, int hash_len
)
368 scatterwalk_map_and_copy(hash
, dst_sg
, len
, hash_len
, 1);
371 static void read_tls_hash( struct scatterlist
*dst_sg
, int len
, void* hash
, int hash_len
)
373 scatterwalk_map_and_copy(hash
, dst_sg
, len
-hash_len
, hash_len
, 0);
376 static int pad_record( struct scatterlist
*dst_sg
, int len
, int block_size
)
378 uint8_t pad
[block_size
];
379 int pad_size
= block_size
- (len
% block_size
);
381 memset(pad
, pad_size
-1, pad_size
);
383 scatterwalk_map_and_copy(pad
, dst_sg
, len
, pad_size
, 1);
388 static int verify_tls_record_pad( struct scatterlist
*dst_sg
, int len
, int block_size
)
390 uint8_t pad
[256]; /* the maximum allowed */
394 scatterwalk_map_and_copy(&pad_size
, dst_sg
, len
-1, 1, 0);
396 if (pad_size
+1 > len
) {
397 dprintk(1, KERN_ERR
, "Pad size: %d\n", pad_size
);
401 scatterwalk_map_and_copy(pad
, dst_sg
, len
-pad_size
-1, pad_size
+1, 0);
403 for (i
=0;i
<pad_size
;i
++)
404 if (pad
[i
] != pad_size
) {
405 dprintk(1, KERN_ERR
, "Pad size: %d, pad: %d\n", pad_size
, (int)pad
[i
]);
413 tls_auth_n_crypt(struct csession
*ses_ptr
, struct kernel_crypt_auth_op
*kcaop
,
414 struct scatterlist
*auth_sg
, uint32_t auth_len
,
415 struct scatterlist
*dst_sg
, uint32_t len
)
418 struct crypt_auth_op
*caop
= &kcaop
->caop
;
419 uint8_t vhash
[AALG_MAX_RESULT_LEN
];
420 uint8_t hash_output
[AALG_MAX_RESULT_LEN
];
422 /* TLS authenticates the plaintext except for the padding.
424 if (caop
->op
== COP_ENCRYPT
) {
425 if (ses_ptr
->hdata
.init
!= 0) {
427 ret
= cryptodev_hash_update(&ses_ptr
->hdata
,
430 dprintk(0, KERN_ERR
, "cryptodev_hash_update: %d\n", ret
);
436 ret
= cryptodev_hash_update(&ses_ptr
->hdata
,
439 dprintk(0, KERN_ERR
, "cryptodev_hash_update: %d\n", ret
);
444 ret
= cryptodev_hash_final(&ses_ptr
->hdata
, hash_output
);
446 dprintk(0, KERN_ERR
, "cryptodev_hash_final: %d\n", ret
);
450 copy_tls_hash( dst_sg
, len
, hash_output
, caop
->tag_len
);
451 len
+= caop
->tag_len
;
454 if (ses_ptr
->cdata
.init
!= 0) {
455 if (ses_ptr
->cdata
.blocksize
> 1) {
456 ret
= pad_record(dst_sg
, len
, ses_ptr
->cdata
.blocksize
);
460 ret
= cryptodev_cipher_encrypt(&ses_ptr
->cdata
,
461 dst_sg
, dst_sg
, len
);
463 dprintk(0, KERN_ERR
, "cryptodev_cipher_encrypt: %d\n", ret
);
468 if (ses_ptr
->cdata
.init
!= 0) {
469 ret
= cryptodev_cipher_decrypt(&ses_ptr
->cdata
,
470 dst_sg
, dst_sg
, len
);
473 dprintk(0, KERN_ERR
, "cryptodev_cipher_decrypt: %d\n", ret
);
477 if (ses_ptr
->cdata
.blocksize
> 1) {
478 ret
= verify_tls_record_pad(dst_sg
, len
, ses_ptr
->cdata
.blocksize
);
479 if (unlikely(ret
< 0)) {
480 dprintk(0, KERN_ERR
, "verify_record_pad: %d\n", ret
);
488 if (ses_ptr
->hdata
.init
!= 0) {
489 if (unlikely(caop
->tag_len
> sizeof(vhash
) || caop
->tag_len
> len
)) {
490 dprintk(1, KERN_ERR
, "Illegal tag len size\n");
495 read_tls_hash( dst_sg
, len
, vhash
, caop
->tag_len
);
496 len
-= caop
->tag_len
;
499 ret
= cryptodev_hash_update(&ses_ptr
->hdata
,
502 dprintk(0, KERN_ERR
, "cryptodev_hash_update: %d\n", ret
);
508 ret
= cryptodev_hash_update(&ses_ptr
->hdata
,
511 dprintk(0, KERN_ERR
, "cryptodev_hash_update: %d\n", ret
);
516 ret
= cryptodev_hash_final(&ses_ptr
->hdata
, hash_output
);
518 dprintk(0, KERN_ERR
, "cryptodev_hash_final: %d\n", ret
);
522 if (memcmp(vhash
, hash_output
, caop
->tag_len
) != 0 || fail
!= 0) {
523 dprintk(1, KERN_ERR
, "MAC verification failed (tag_len: %d)\n", caop
->tag_len
);
529 kcaop
->dst_len
= len
;
536 srtp_auth_n_crypt(struct csession
*ses_ptr
, struct kernel_crypt_auth_op
*kcaop
,
537 struct scatterlist
*auth_sg
, uint32_t auth_len
,
538 struct scatterlist
*dst_sg
, uint32_t len
)
541 struct crypt_auth_op
*caop
= &kcaop
->caop
;
542 uint8_t vhash
[AALG_MAX_RESULT_LEN
];
543 uint8_t hash_output
[AALG_MAX_RESULT_LEN
];
545 /* SRTP authenticates the encrypted data.
547 if (caop
->op
== COP_ENCRYPT
) {
548 if (ses_ptr
->cdata
.init
!= 0) {
549 if (ses_ptr
->cdata
.stream
== 0) {
550 dprintk(0, KERN_ERR
, "Only stream modes are allowed in SRTP mode\n");
555 ret
= cryptodev_cipher_encrypt(&ses_ptr
->cdata
,
556 dst_sg
, dst_sg
, len
);
558 dprintk(0, KERN_ERR
, "cryptodev_cipher_encrypt: %d\n", ret
);
563 if (ses_ptr
->hdata
.init
!= 0) {
565 ret
= cryptodev_hash_update(&ses_ptr
->hdata
,
568 dprintk(0, KERN_ERR
, "cryptodev_hash_update: %d\n", ret
);
573 ret
= cryptodev_hash_final(&ses_ptr
->hdata
, hash_output
);
575 dprintk(0, KERN_ERR
, "cryptodev_hash_final: %d\n", ret
);
579 if (unlikely(copy_to_user(caop
->tag
, hash_output
, caop
->tag_len
))) {
586 if (ses_ptr
->hdata
.init
!= 0) {
587 if (unlikely(caop
->tag_len
> sizeof(vhash
) || caop
->tag_len
> len
)) {
588 dprintk(1, KERN_ERR
, "Illegal tag len size\n");
593 if (unlikely(copy_from_user(vhash
, caop
->tag
, caop
->tag_len
))) {
598 ret
= cryptodev_hash_update(&ses_ptr
->hdata
,
601 dprintk(0, KERN_ERR
, "cryptodev_hash_update: %d\n", ret
);
605 ret
= cryptodev_hash_final(&ses_ptr
->hdata
, hash_output
);
607 dprintk(0, KERN_ERR
, "cryptodev_hash_final: %d\n", ret
);
611 if (memcmp(vhash
, hash_output
, caop
->tag_len
) != 0 || fail
!= 0) {
612 dprintk(1, KERN_ERR
, "MAC verification failed\n");
618 if (ses_ptr
->cdata
.init
!= 0) {
619 if (ses_ptr
->cdata
.stream
== 0) {
620 dprintk(0, KERN_ERR
, "Only stream modes are allowed in SRTP mode\n");
625 ret
= cryptodev_cipher_decrypt(&ses_ptr
->cdata
,
626 dst_sg
, dst_sg
, len
);
629 dprintk(0, KERN_ERR
, "cryptodev_cipher_decrypt: %d\n", ret
);
635 kcaop
->dst_len
= len
;
641 /* This is the main crypto function - zero-copy edition */
643 __crypto_auth_run_zc(struct csession
*ses_ptr
, struct kernel_crypt_auth_op
*kcaop
)
645 struct scatterlist
*dst_sg
, *auth_sg
;
646 struct crypt_auth_op
*caop
= &kcaop
->caop
;
647 int ret
= 0, pagecount
;
649 if (caop
->flags
& COP_FLAG_AEAD_TLS_TYPE
) {
650 ret
= get_userbuf_aead(ses_ptr
, kcaop
, &auth_sg
, &dst_sg
, &pagecount
);
652 dprintk(1, KERN_ERR
, "Error getting user pages.\n");
656 ret
= tls_auth_n_crypt(ses_ptr
, kcaop
, auth_sg
, caop
->auth_len
,
658 } else if (caop
->flags
& COP_FLAG_AEAD_SRTP_TYPE
) {
659 ret
= get_userbuf_srtp(ses_ptr
, kcaop
, &auth_sg
, &dst_sg
, &pagecount
);
661 dprintk(1, KERN_ERR
, "Error getting user pages.\n");
665 ret
= srtp_auth_n_crypt(ses_ptr
, kcaop
, auth_sg
, caop
->auth_len
,
668 dprintk(1, KERN_ERR
, "Unsupported flag for authenc\n");
672 release_user_pages(ses_ptr
->pages
, pagecount
);
677 int crypto_auth_run(struct fcrypt
*fcr
, struct kernel_crypt_auth_op
*kcaop
)
679 struct csession
*ses_ptr
;
680 struct crypt_auth_op
*caop
= &kcaop
->caop
;
683 if (unlikely(caop
->op
!= COP_ENCRYPT
&& caop
->op
!= COP_DECRYPT
)) {
684 dprintk(1, KERN_DEBUG
, "invalid operation op=%u\n", caop
->op
);
688 /* this also enters ses_ptr->sem */
689 ses_ptr
= crypto_get_session_by_sid(fcr
, caop
->ses
);
690 if (unlikely(!ses_ptr
)) {
691 dprintk(1, KERN_ERR
, "invalid session ID=0x%08X\n", caop
->ses
);
695 if (unlikely(ses_ptr
->cdata
.init
== 0)) {
696 dprintk(1, KERN_ERR
, "cipher context not initialized\n");
700 if (ses_ptr
->hdata
.init
!= 0) {
701 ret
= cryptodev_hash_reset(&ses_ptr
->hdata
);
704 "error in cryptodev_hash_reset()\n");
709 cryptodev_cipher_set_iv(&ses_ptr
->cdata
, kcaop
->iv
,
710 min(ses_ptr
->cdata
.ivsize
, kcaop
->ivlen
));
712 if (likely(caop
->len
|| caop
->auth_len
)) {
713 ret
= __crypto_auth_run_zc(ses_ptr
, kcaop
);
718 cryptodev_cipher_get_iv(&ses_ptr
->cdata
, kcaop
->iv
,
719 min(ses_ptr
->cdata
.ivsize
, kcaop
->ivlen
));
722 mutex_unlock(&ses_ptr
->sem
);