2 * Driver for /dev/crypto device (aka CryptoDev)
4 * Copyright (c) 2011 Nikos Mavrogiannopoulos <nmav@gnutls.org>
6 * This file is part of linux cryptodev.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
25 * This file handles the AEAD part of /dev/crypto.
29 #include <crypto/hash.h>
30 #include <linux/crypto.h>
32 #include <linux/highmem.h>
33 #include <linux/ioctl.h>
34 #include <linux/random.h>
35 #include <linux/syscalls.h>
36 #include <linux/pagemap.h>
37 #include <linux/poll.h>
38 #include <linux/uaccess.h>
39 #include <crypto/cryptodev.h>
40 #include <crypto/scatterwalk.h>
41 #include <linux/scatterlist.h>
42 #include "cryptodev_int.h"
46 /* make cop->src and cop->dst available in scatterlists */
47 static int get_userbuf_aead(struct csession
*ses
, struct kernel_crypt_auth_op
*kcaop
,
48 struct scatterlist
**auth_sg
, struct scatterlist
**dst_sg
,
51 int dst_pagecount
= 0, pagecount
;
52 int auth_pagecount
= 0;
53 struct crypt_auth_op
*caop
= &kcaop
->caop
;
56 if (caop
->dst
== NULL
&& caop
->auth_src
== NULL
)
60 if (!IS_ALIGNED((unsigned long)caop
->dst
, ses
->alignmask
))
61 dprintk(2, KERN_WARNING
, "%s: careful - source address %lx is not %d byte aligned\n",
62 __func__
, (unsigned long)caop
->dst
, ses
->alignmask
+ 1);
63 if (!IS_ALIGNED((unsigned long)caop
->auth_src
, ses
->alignmask
))
64 dprintk(2, KERN_WARNING
, "%s: careful - source address %lx is not %d byte aligned\n",
65 __func__
, (unsigned long)caop
->auth_src
, ses
->alignmask
+ 1);
68 if (kcaop
->dst_len
== 0) {
69 dprintk(1, KERN_WARNING
, "Destination length cannot be zero\n");
73 if (caop
->auth_len
> 0)
74 auth_pagecount
= PAGECOUNT(caop
->auth_src
, caop
->auth_len
);
76 dst_pagecount
= PAGECOUNT(caop
->dst
, kcaop
->dst_len
);
78 (*tot_pages
) = pagecount
= auth_pagecount
+ dst_pagecount
;
80 rc
= adjust_sg_array(ses
, pagecount
);
84 if (auth_pagecount
> 0) {
85 rc
= __get_userbuf(caop
->auth_src
, caop
->auth_len
, 0, auth_pagecount
,
86 ses
->pages
, ses
->sg
, kcaop
->task
, kcaop
->mm
);
89 "failed to get user pages for data input\n");
93 (*dst_sg
) = ses
->sg
+ auth_pagecount
;
99 rc
= __get_userbuf(caop
->dst
, kcaop
->dst_len
, 1, dst_pagecount
,
100 ses
->pages
+ auth_pagecount
, *dst_sg
, kcaop
->task
, kcaop
->mm
);
102 release_user_pages(ses
->pages
, auth_pagecount
);
104 "failed to get user pages for data input\n");
111 static int get_userbuf_srtp(struct csession
*ses
, struct kernel_crypt_auth_op
*kcaop
,
112 struct scatterlist
**auth_sg
, struct scatterlist
**dst_sg
,
116 int auth_pagecount
= 0;
117 struct crypt_auth_op
*caop
= &kcaop
->caop
;
120 if (caop
->dst
== NULL
&& caop
->auth_src
== NULL
)
123 if (ses
->alignmask
) {
124 if (!IS_ALIGNED((unsigned long)caop
->dst
, ses
->alignmask
))
125 dprintk(2, KERN_WARNING
, "%s: careful - source address %lx is not %d byte aligned\n",
126 __func__
, (unsigned long)caop
->dst
, ses
->alignmask
+ 1);
127 if (!IS_ALIGNED((unsigned long)caop
->auth_src
, ses
->alignmask
))
128 dprintk(2, KERN_WARNING
, "%s: careful - source address %lx is not %d byte aligned\n",
129 __func__
, (unsigned long)caop
->auth_src
, ses
->alignmask
+ 1);
132 if (unlikely(kcaop
->dst_len
== 0 || caop
->auth_len
== 0)) {
133 dprintk(1, KERN_WARNING
, "Destination length cannot be zero\n");
137 /* Note that in SRTP auth data overlap with data to be encrypted (dst)
140 auth_pagecount
= PAGECOUNT(caop
->auth_src
, caop
->auth_len
);
141 diff
= (int)(caop
->src
- caop
->auth_src
);
142 if (diff
> 256 || diff
< 0) {
143 dprintk(1, KERN_WARNING
, "auth_src must overlap with src (diff: %d).\n", diff
);
147 (*tot_pages
) = pagecount
= auth_pagecount
;
149 rc
= adjust_sg_array(ses
, pagecount
);
153 rc
= __get_userbuf(caop
->auth_src
, caop
->auth_len
, 1, auth_pagecount
,
154 ses
->pages
, ses
->sg
, kcaop
->task
, kcaop
->mm
);
157 "failed to get user pages for data input\n");
160 (*auth_sg
) = ses
->sg
;
162 memcpy(&ses
->sg2
, ses
->sg
, sizeof(ses
->sg
[0]));
163 ses
->sg2
.offset
+= diff
;
164 (*dst_sg
) = &ses
->sg2
;
170 static int fill_kcaop_from_caop(struct kernel_crypt_auth_op
*kcaop
, struct fcrypt
*fcr
)
172 struct crypt_auth_op
*caop
= &kcaop
->caop
;
173 struct csession
*ses_ptr
;
176 /* this also enters ses_ptr->sem */
177 ses_ptr
= crypto_get_session_by_sid(fcr
, caop
->ses
);
178 if (unlikely(!ses_ptr
)) {
179 dprintk(1, KERN_ERR
, "invalid session ID=0x%08X\n", caop
->ses
);
183 if (caop
->src
!= caop
->dst
) {
185 "Non-inplace encryption and decryption is not efficient\n");
186 if (caop
->len
> sizeof(ses_ptr
->buffer
)) {
188 "The provided buffer is too large\n");
193 if (unlikely(copy_from_user(ses_ptr
->buffer
, caop
->src
, caop
->len
))) {
198 if (unlikely(copy_to_user(caop
->dst
, ses_ptr
->buffer
, caop
->len
))) {
205 if (caop
->tag_len
== 0)
206 caop
->tag_len
= ses_ptr
->hdata
.digestsize
;
208 kcaop
->ivlen
= caop
->iv
? ses_ptr
->cdata
.ivsize
: 0;
210 if (caop
->flags
& COP_FLAG_AEAD_TLS_TYPE
)
211 kcaop
->dst_len
= caop
->len
+ ses_ptr
->cdata
.blocksize
/* pad */ + caop
->tag_len
;
213 kcaop
->dst_len
= caop
->len
;
215 kcaop
->task
= current
;
216 kcaop
->mm
= current
->mm
;
219 rc
= copy_from_user(kcaop
->iv
, caop
->iv
, kcaop
->ivlen
);
222 "error copying IV (%d bytes), copy_from_user returned %d for address %lx\n",
223 kcaop
->ivlen
, rc
, (unsigned long)caop
->iv
);
232 crypto_put_session(ses_ptr
);
237 static int fill_caop_from_kcaop(struct kernel_crypt_auth_op
*kcaop
, struct fcrypt
*fcr
)
241 kcaop
->caop
.len
= kcaop
->dst_len
;
243 if (kcaop
->ivlen
&& kcaop
->caop
.flags
& COP_FLAG_WRITE_IV
) {
244 ret
= copy_to_user(kcaop
->caop
.iv
,
245 kcaop
->iv
, kcaop
->ivlen
);
247 dprintk(1, KERN_ERR
, "Error in copying to userspace\n");
255 int kcaop_from_user(struct kernel_crypt_auth_op
*kcaop
,
256 struct fcrypt
*fcr
, void __user
*arg
)
258 if (unlikely(copy_from_user(&kcaop
->caop
, arg
, sizeof(kcaop
->caop
)))) {
259 dprintk(1, KERN_ERR
, "Error in copying from userspace\n");
263 return fill_kcaop_from_caop(kcaop
, fcr
);
266 int kcaop_to_user(struct kernel_crypt_auth_op
*kcaop
,
267 struct fcrypt
*fcr
, void __user
*arg
)
271 ret
= fill_caop_from_kcaop(kcaop
, fcr
);
273 dprintk(1, KERN_ERR
, "fill_caop_from_kcaop\n");
277 if (unlikely(copy_to_user(arg
, &kcaop
->caop
, sizeof(kcaop
->caop
)))) {
278 dprintk(1, KERN_ERR
, "Error in copying to userspace\n");
284 static void copy_hash( struct scatterlist
*dst_sg
, int len
, void* hash
, int hash_len
)
286 scatterwalk_map_and_copy(hash
, dst_sg
, len
, hash_len
, 1);
289 static void read_hash( struct scatterlist
*dst_sg
, int len
, void* hash
, int hash_len
)
291 scatterwalk_map_and_copy(hash
, dst_sg
, len
-hash_len
, hash_len
, 0);
294 static int pad_record( struct scatterlist
*dst_sg
, int len
, int block_size
)
296 uint8_t pad
[block_size
];
297 int pad_size
= block_size
- (len
% block_size
);
299 memset(pad
, pad_size
-1, pad_size
);
301 scatterwalk_map_and_copy(pad
, dst_sg
, len
, pad_size
, 1);
306 static int verify_record_pad( struct scatterlist
*dst_sg
, int len
, int block_size
)
308 uint8_t pad
[256]; /* the maximum allowed */
312 scatterwalk_map_and_copy(&pad_size
, dst_sg
, len
-1, 1, 0);
318 scatterwalk_map_and_copy(pad
, dst_sg
, len
-pad_size
, pad_size
, 0);
320 for (i
=0;i
<pad_size
;i
++)
321 if (pad
[i
] != pad_size
)
328 tls_auth_n_crypt(struct csession
*ses_ptr
, struct kernel_crypt_auth_op
*kcaop
,
329 struct scatterlist
*auth_sg
, uint32_t auth_len
,
330 struct scatterlist
*dst_sg
, uint32_t len
)
333 struct crypt_auth_op
*caop
= &kcaop
->caop
;
334 uint8_t vhash
[AALG_MAX_RESULT_LEN
];
335 uint8_t hash_output
[AALG_MAX_RESULT_LEN
];
337 /* TLS authenticates the plaintext except for the padding.
339 if (caop
->op
== COP_ENCRYPT
) {
340 if (ses_ptr
->hdata
.init
!= 0) {
342 ret
= cryptodev_hash_update(&ses_ptr
->hdata
,
345 dprintk(0, KERN_ERR
, "cryptodev_hash_update: %d\n", ret
);
351 ret
= cryptodev_hash_update(&ses_ptr
->hdata
,
354 dprintk(0, KERN_ERR
, "cryptodev_hash_update: %d\n", ret
);
359 ret
= cryptodev_hash_final(&ses_ptr
->hdata
, hash_output
);
361 dprintk(0, KERN_ERR
, "cryptodev_hash_final: %d\n", ret
);
365 copy_hash( dst_sg
, len
, hash_output
, caop
->tag_len
);
366 len
+= caop
->tag_len
;
369 if (ses_ptr
->cdata
.init
!= 0) {
370 if (ses_ptr
->cdata
.blocksize
> 1) {
371 ret
= pad_record(dst_sg
, len
, ses_ptr
->cdata
.blocksize
);
375 ret
= cryptodev_cipher_encrypt(&ses_ptr
->cdata
,
376 dst_sg
, dst_sg
, len
);
378 dprintk(0, KERN_ERR
, "cryptodev_cipher_encrypt: %d\n", ret
);
383 if (ses_ptr
->cdata
.init
!= 0) {
384 ret
= cryptodev_cipher_decrypt(&ses_ptr
->cdata
,
385 dst_sg
, dst_sg
, len
);
388 dprintk(0, KERN_ERR
, "cryptodev_cipher_decrypt: %d\n", ret
);
392 if (ses_ptr
->cdata
.blocksize
> 1) {
393 ret
= verify_record_pad(dst_sg
, len
, ses_ptr
->cdata
.blocksize
);
395 dprintk(0, KERN_ERR
, "verify_record_pad: %d\n", ret
);
402 if (ses_ptr
->hdata
.init
!= 0) {
403 if (unlikely(caop
->tag_len
> sizeof(vhash
) || caop
->tag_len
> len
)) {
404 dprintk(1, KERN_ERR
, "Illegal tag len size\n");
409 read_hash( dst_sg
, len
, vhash
, caop
->tag_len
);
410 len
-= caop
->tag_len
;
413 ret
= cryptodev_hash_update(&ses_ptr
->hdata
,
416 dprintk(0, KERN_ERR
, "cryptodev_hash_update: %d\n", ret
);
422 ret
= cryptodev_hash_update(&ses_ptr
->hdata
,
425 dprintk(0, KERN_ERR
, "cryptodev_hash_update: %d\n", ret
);
430 ret
= cryptodev_hash_final(&ses_ptr
->hdata
, hash_output
);
432 dprintk(0, KERN_ERR
, "cryptodev_hash_final: %d\n", ret
);
436 if (memcmp(vhash
, hash_output
, caop
->tag_len
) != 0 || fail
!= 0) {
437 dprintk(1, KERN_ERR
, "MAC verification failed\n");
443 kcaop
->dst_len
= len
;
450 srtp_auth_n_crypt(struct csession
*ses_ptr
, struct kernel_crypt_auth_op
*kcaop
,
451 struct scatterlist
*auth_sg
, uint32_t auth_len
,
452 struct scatterlist
*dst_sg
, uint32_t len
)
455 struct crypt_auth_op
*caop
= &kcaop
->caop
;
456 uint8_t vhash
[AALG_MAX_RESULT_LEN
];
457 uint8_t hash_output
[AALG_MAX_RESULT_LEN
];
459 /* SRTP authenticates the encrypted data.
461 if (caop
->op
== COP_ENCRYPT
) {
462 if (ses_ptr
->cdata
.init
!= 0) {
463 if (ses_ptr
->cdata
.stream
== 0) {
464 dprintk(0, KERN_ERR
, "Only stream modes are allowed in SRTP mode\n");
469 ret
= cryptodev_cipher_encrypt(&ses_ptr
->cdata
,
470 dst_sg
, dst_sg
, len
);
472 dprintk(0, KERN_ERR
, "cryptodev_cipher_encrypt: %d\n", ret
);
477 if (ses_ptr
->hdata
.init
!= 0) {
479 ret
= cryptodev_hash_update(&ses_ptr
->hdata
,
482 dprintk(0, KERN_ERR
, "cryptodev_hash_update: %d\n", ret
);
487 ret
= cryptodev_hash_final(&ses_ptr
->hdata
, hash_output
);
489 dprintk(0, KERN_ERR
, "cryptodev_hash_final: %d\n", ret
);
493 if (unlikely(copy_to_user(caop
->tag
, hash_output
, caop
->tag_len
))) {
500 if (ses_ptr
->hdata
.init
!= 0) {
501 if (unlikely(caop
->tag_len
> sizeof(vhash
) || caop
->tag_len
> len
)) {
502 dprintk(1, KERN_ERR
, "Illegal tag len size\n");
507 if (unlikely(copy_from_user(vhash
, caop
->tag
, caop
->tag_len
))) {
512 ret
= cryptodev_hash_update(&ses_ptr
->hdata
,
515 dprintk(0, KERN_ERR
, "cryptodev_hash_update: %d\n", ret
);
519 ret
= cryptodev_hash_final(&ses_ptr
->hdata
, hash_output
);
521 dprintk(0, KERN_ERR
, "cryptodev_hash_final: %d\n", ret
);
525 if (memcmp(vhash
, hash_output
, caop
->tag_len
) != 0 || fail
!= 0) {
526 dprintk(1, KERN_ERR
, "MAC verification failed\n");
532 if (ses_ptr
->cdata
.init
!= 0) {
533 if (ses_ptr
->cdata
.stream
== 0) {
534 dprintk(0, KERN_ERR
, "Only stream modes are allowed in SRTP mode\n");
539 ret
= cryptodev_cipher_decrypt(&ses_ptr
->cdata
,
540 dst_sg
, dst_sg
, len
);
543 dprintk(0, KERN_ERR
, "cryptodev_cipher_decrypt: %d\n", ret
);
549 kcaop
->dst_len
= len
;
555 /* This is the main crypto function - zero-copy edition */
557 __crypto_auth_run_zc(struct csession
*ses_ptr
, struct kernel_crypt_auth_op
*kcaop
)
559 struct scatterlist
*dst_sg
, *auth_sg
;
560 struct crypt_auth_op
*caop
= &kcaop
->caop
;
561 int ret
= 0, pagecount
;
563 if (caop
->flags
& COP_FLAG_AEAD_TLS_TYPE
) {
564 ret
= get_userbuf_aead(ses_ptr
, kcaop
, &auth_sg
, &dst_sg
, &pagecount
);
566 dprintk(1, KERN_ERR
, "Error getting user pages.\n");
570 ret
= tls_auth_n_crypt(ses_ptr
, kcaop
, auth_sg
, caop
->auth_len
,
572 } else if (caop
->flags
& COP_FLAG_AEAD_SRTP_TYPE
) {
573 ret
= get_userbuf_srtp(ses_ptr
, kcaop
, &auth_sg
, &dst_sg
, &pagecount
);
575 dprintk(1, KERN_ERR
, "Error getting user pages.\n");
579 ret
= srtp_auth_n_crypt(ses_ptr
, kcaop
, auth_sg
, caop
->auth_len
,
582 dprintk(1, KERN_ERR
, "Unsupported flag for authenc\n");
586 release_user_pages(ses_ptr
->pages
, pagecount
);
591 int crypto_auth_run(struct fcrypt
*fcr
, struct kernel_crypt_auth_op
*kcaop
)
593 struct csession
*ses_ptr
;
594 struct crypt_auth_op
*caop
= &kcaop
->caop
;
597 if (unlikely(caop
->op
!= COP_ENCRYPT
&& caop
->op
!= COP_DECRYPT
)) {
598 dprintk(1, KERN_DEBUG
, "invalid operation op=%u\n", caop
->op
);
602 /* this also enters ses_ptr->sem */
603 ses_ptr
= crypto_get_session_by_sid(fcr
, caop
->ses
);
604 if (unlikely(!ses_ptr
)) {
605 dprintk(1, KERN_ERR
, "invalid session ID=0x%08X\n", caop
->ses
);
609 if (unlikely(ses_ptr
->cdata
.init
== 0)) {
610 dprintk(1, KERN_ERR
, "cipher context not initialized\n");
614 if (ses_ptr
->hdata
.init
!= 0) {
615 ret
= cryptodev_hash_reset(&ses_ptr
->hdata
);
618 "error in cryptodev_hash_reset()\n");
623 cryptodev_cipher_set_iv(&ses_ptr
->cdata
, kcaop
->iv
,
624 min(ses_ptr
->cdata
.ivsize
, kcaop
->ivlen
));
626 if (likely(caop
->len
|| caop
->auth_len
)) {
627 ret
= __crypto_auth_run_zc(ses_ptr
, kcaop
);
632 cryptodev_cipher_get_iv(&ses_ptr
->cdata
, kcaop
->iv
,
633 min(ses_ptr
->cdata
.ivsize
, kcaop
->ivlen
));
636 mutex_unlock(&ses_ptr
->sem
);