2 * algif_aead: User-space interface for AEAD algorithms
4 * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
6 * This file provides the user-space API for AEAD ciphers.
8 * This file is derived from algif_skcipher.c.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
16 #include <crypto/internal/aead.h>
17 #include <crypto/scatterwalk.h>
18 #include <crypto/if_alg.h>
19 #include <linux/init.h>
20 #include <linux/list.h>
21 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/net.h>
29 struct scatterlist sg
[ALG_MAX_PAGES
];
32 struct aead_async_rsgl
{
33 struct af_alg_sgl sgl
;
34 struct list_head list
;
37 struct aead_async_req
{
38 struct scatterlist
*tsgl
;
39 struct aead_async_rsgl first_rsgl
;
40 struct list_head list
;
47 struct aead_sg_list tsgl
;
48 struct aead_async_rsgl first_rsgl
;
49 struct list_head list
;
53 struct af_alg_completion completion
;
63 struct aead_request aead_req
;
66 static inline int aead_sndbuf(struct sock
*sk
)
68 struct alg_sock
*ask
= alg_sk(sk
);
69 struct aead_ctx
*ctx
= ask
->private;
71 return max_t(int, max_t(int, sk
->sk_sndbuf
& PAGE_MASK
, PAGE_SIZE
) -
75 static inline bool aead_writable(struct sock
*sk
)
77 return PAGE_SIZE
<= aead_sndbuf(sk
);
80 static inline bool aead_sufficient_data(struct aead_ctx
*ctx
)
82 unsigned as
= crypto_aead_authsize(crypto_aead_reqtfm(&ctx
->aead_req
));
84 return ctx
->used
>= ctx
->aead_assoclen
+ as
;
87 static void aead_reset_ctx(struct aead_ctx
*ctx
)
89 struct aead_sg_list
*sgl
= &ctx
->tsgl
;
91 sg_init_table(sgl
->sg
, ALG_MAX_PAGES
);
98 static void aead_put_sgl(struct sock
*sk
)
100 struct alg_sock
*ask
= alg_sk(sk
);
101 struct aead_ctx
*ctx
= ask
->private;
102 struct aead_sg_list
*sgl
= &ctx
->tsgl
;
103 struct scatterlist
*sg
= sgl
->sg
;
106 for (i
= 0; i
< sgl
->cur
; i
++) {
107 if (!sg_page(sg
+ i
))
110 put_page(sg_page(sg
+ i
));
111 sg_assign_page(sg
+ i
, NULL
);
116 static void aead_wmem_wakeup(struct sock
*sk
)
118 struct socket_wq
*wq
;
120 if (!aead_writable(sk
))
124 wq
= rcu_dereference(sk
->sk_wq
);
125 if (skwq_has_sleeper(wq
))
126 wake_up_interruptible_sync_poll(&wq
->wait
, POLLIN
|
129 sk_wake_async(sk
, SOCK_WAKE_WAITD
, POLL_IN
);
133 static int aead_wait_for_data(struct sock
*sk
, unsigned flags
)
135 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
136 struct alg_sock
*ask
= alg_sk(sk
);
137 struct aead_ctx
*ctx
= ask
->private;
139 int err
= -ERESTARTSYS
;
141 if (flags
& MSG_DONTWAIT
)
144 sk_set_bit(SOCKWQ_ASYNC_WAITDATA
, sk
);
145 add_wait_queue(sk_sleep(sk
), &wait
);
147 if (signal_pending(current
))
149 timeout
= MAX_SCHEDULE_TIMEOUT
;
150 if (sk_wait_event(sk
, &timeout
, !ctx
->more
, &wait
)) {
155 remove_wait_queue(sk_sleep(sk
), &wait
);
157 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA
, sk
);
162 static void aead_data_wakeup(struct sock
*sk
)
164 struct alg_sock
*ask
= alg_sk(sk
);
165 struct aead_ctx
*ctx
= ask
->private;
166 struct socket_wq
*wq
;
174 wq
= rcu_dereference(sk
->sk_wq
);
175 if (skwq_has_sleeper(wq
))
176 wake_up_interruptible_sync_poll(&wq
->wait
, POLLOUT
|
179 sk_wake_async(sk
, SOCK_WAKE_SPACE
, POLL_OUT
);
183 static int aead_sendmsg(struct socket
*sock
, struct msghdr
*msg
, size_t size
)
185 struct sock
*sk
= sock
->sk
;
186 struct alg_sock
*ask
= alg_sk(sk
);
187 struct aead_ctx
*ctx
= ask
->private;
189 crypto_aead_ivsize(crypto_aead_reqtfm(&ctx
->aead_req
));
190 struct aead_sg_list
*sgl
= &ctx
->tsgl
;
191 struct af_alg_control con
= {};
197 if (msg
->msg_controllen
) {
198 err
= af_alg_cmsg_send(msg
, &con
);
214 if (con
.iv
&& con
.iv
->ivlen
!= ivsize
)
219 if (!ctx
->more
&& ctx
->used
)
225 memcpy(ctx
->iv
, con
.iv
->iv
, ivsize
);
227 ctx
->aead_assoclen
= con
.aead_assoclen
;
232 struct scatterlist
*sg
= NULL
;
234 /* use the existing memory in an allocated page */
236 sg
= sgl
->sg
+ sgl
->cur
- 1;
237 len
= min_t(unsigned long, len
,
238 PAGE_SIZE
- sg
->offset
- sg
->length
);
239 err
= memcpy_from_msg(page_address(sg_page(sg
)) +
240 sg
->offset
+ sg
->length
,
246 ctx
->merge
= (sg
->offset
+ sg
->length
) &
255 if (!aead_writable(sk
)) {
256 /* user space sent too much data */
262 /* allocate a new page */
263 len
= min_t(unsigned long, size
, aead_sndbuf(sk
));
267 if (sgl
->cur
>= ALG_MAX_PAGES
) {
273 sg
= sgl
->sg
+ sgl
->cur
;
274 plen
= min_t(size_t, len
, PAGE_SIZE
);
276 sg_assign_page(sg
, alloc_page(GFP_KERNEL
));
281 err
= memcpy_from_msg(page_address(sg_page(sg
)),
284 __free_page(sg_page(sg
));
285 sg_assign_page(sg
, NULL
);
296 ctx
->merge
= plen
& (PAGE_SIZE
- 1);
302 ctx
->more
= msg
->msg_flags
& MSG_MORE
;
303 if (!ctx
->more
&& !aead_sufficient_data(ctx
)) {
309 aead_data_wakeup(sk
);
312 return err
?: copied
;
315 static ssize_t
aead_sendpage(struct socket
*sock
, struct page
*page
,
316 int offset
, size_t size
, int flags
)
318 struct sock
*sk
= sock
->sk
;
319 struct alg_sock
*ask
= alg_sk(sk
);
320 struct aead_ctx
*ctx
= ask
->private;
321 struct aead_sg_list
*sgl
= &ctx
->tsgl
;
324 if (flags
& MSG_SENDPAGE_NOTLAST
)
327 if (sgl
->cur
>= ALG_MAX_PAGES
)
331 if (!ctx
->more
&& ctx
->used
)
337 if (!aead_writable(sk
)) {
338 /* user space sent too much data */
347 sg_set_page(sgl
->sg
+ sgl
->cur
, page
, size
, offset
);
354 ctx
->more
= flags
& MSG_MORE
;
355 if (!ctx
->more
&& !aead_sufficient_data(ctx
)) {
361 aead_data_wakeup(sk
);
367 #define GET_ASYM_REQ(req, tfm) (struct aead_async_req *) \
368 ((char *)req + sizeof(struct aead_request) + \
369 crypto_aead_reqsize(tfm))
371 #define GET_REQ_SIZE(tfm) sizeof(struct aead_async_req) + \
372 crypto_aead_reqsize(tfm) + crypto_aead_ivsize(tfm) + \
373 sizeof(struct aead_request)
375 static void aead_async_cb(struct crypto_async_request
*_req
, int err
)
377 struct sock
*sk
= _req
->data
;
378 struct alg_sock
*ask
= alg_sk(sk
);
379 struct aead_ctx
*ctx
= ask
->private;
380 struct crypto_aead
*tfm
= crypto_aead_reqtfm(&ctx
->aead_req
);
381 struct aead_request
*req
= aead_request_cast(_req
);
382 struct aead_async_req
*areq
= GET_ASYM_REQ(req
, tfm
);
383 struct scatterlist
*sg
= areq
->tsgl
;
384 struct aead_async_rsgl
*rsgl
;
385 struct kiocb
*iocb
= areq
->iocb
;
386 unsigned int i
, reqlen
= GET_REQ_SIZE(tfm
);
388 list_for_each_entry(rsgl
, &areq
->list
, list
) {
389 af_alg_free_sg(&rsgl
->sgl
);
390 if (rsgl
!= &areq
->first_rsgl
)
391 sock_kfree_s(sk
, rsgl
, sizeof(*rsgl
));
394 for (i
= 0; i
< areq
->tsgls
; i
++)
395 put_page(sg_page(sg
+ i
));
397 sock_kfree_s(sk
, areq
->tsgl
, sizeof(*areq
->tsgl
) * areq
->tsgls
);
398 sock_kfree_s(sk
, req
, reqlen
);
400 iocb
->ki_complete(iocb
, err
, err
);
403 static int aead_recvmsg_async(struct socket
*sock
, struct msghdr
*msg
,
406 struct sock
*sk
= sock
->sk
;
407 struct alg_sock
*ask
= alg_sk(sk
);
408 struct aead_ctx
*ctx
= ask
->private;
409 struct crypto_aead
*tfm
= crypto_aead_reqtfm(&ctx
->aead_req
);
410 struct aead_async_req
*areq
;
411 struct aead_request
*req
= NULL
;
412 struct aead_sg_list
*sgl
= &ctx
->tsgl
;
413 struct aead_async_rsgl
*last_rsgl
= NULL
, *rsgl
;
414 unsigned int as
= crypto_aead_authsize(tfm
);
415 unsigned int i
, reqlen
= GET_REQ_SIZE(tfm
);
419 size_t usedpages
= 0;
423 err
= aead_wait_for_data(sk
, flags
);
431 if (!aead_sufficient_data(ctx
))
434 req
= sock_kmalloc(sk
, reqlen
, GFP_KERNEL
);
438 areq
= GET_ASYM_REQ(req
, tfm
);
439 memset(&areq
->first_rsgl
, '\0', sizeof(areq
->first_rsgl
));
440 INIT_LIST_HEAD(&areq
->list
);
441 areq
->iocb
= msg
->msg_iocb
;
442 memcpy(areq
->iv
, ctx
->iv
, crypto_aead_ivsize(tfm
));
443 aead_request_set_tfm(req
, tfm
);
444 aead_request_set_ad(req
, ctx
->aead_assoclen
);
445 aead_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
447 used
-= ctx
->aead_assoclen
+ (ctx
->enc
? as
: 0);
449 /* take over all tx sgls from ctx */
450 areq
->tsgl
= sock_kmalloc(sk
, sizeof(*areq
->tsgl
) * sgl
->cur
,
452 if (unlikely(!areq
->tsgl
))
455 sg_init_table(areq
->tsgl
, sgl
->cur
);
456 for (i
= 0; i
< sgl
->cur
; i
++)
457 sg_set_page(&areq
->tsgl
[i
], sg_page(&sgl
->sg
[i
]),
458 sgl
->sg
[i
].length
, sgl
->sg
[i
].offset
);
460 areq
->tsgls
= sgl
->cur
;
463 while (iov_iter_count(&msg
->msg_iter
)) {
464 size_t seglen
= min_t(size_t, iov_iter_count(&msg
->msg_iter
),
465 (outlen
- usedpages
));
467 if (list_empty(&areq
->list
)) {
468 rsgl
= &areq
->first_rsgl
;
471 rsgl
= sock_kmalloc(sk
, sizeof(*rsgl
), GFP_KERNEL
);
472 if (unlikely(!rsgl
)) {
477 rsgl
->sgl
.npages
= 0;
478 list_add_tail(&rsgl
->list
, &areq
->list
);
480 /* make one iovec available as scatterlist */
481 err
= af_alg_make_sg(&rsgl
->sgl
, &msg
->msg_iter
, seglen
);
487 /* chain the new scatterlist with previous one */
489 af_alg_link_sg(&last_rsgl
->sgl
, &rsgl
->sgl
);
493 /* we do not need more iovecs as we have sufficient memory */
494 if (outlen
<= usedpages
)
497 iov_iter_advance(&msg
->msg_iter
, err
);
500 /* ensure output buffer is sufficiently large */
501 if (usedpages
< outlen
)
504 aead_request_set_crypt(req
, areq
->tsgl
, areq
->first_rsgl
.sgl
.sg
, used
,
506 err
= ctx
->enc
? crypto_aead_encrypt(req
) : crypto_aead_decrypt(req
);
508 if (err
== -EINPROGRESS
) {
513 } else if (err
== -EBADMSG
) {
521 list_for_each_entry(rsgl
, &areq
->list
, list
) {
522 af_alg_free_sg(&rsgl
->sgl
);
523 if (rsgl
!= &areq
->first_rsgl
)
524 sock_kfree_s(sk
, rsgl
, sizeof(*rsgl
));
527 sock_kfree_s(sk
, areq
->tsgl
, sizeof(*areq
->tsgl
) * areq
->tsgls
);
529 sock_kfree_s(sk
, req
, reqlen
);
531 aead_wmem_wakeup(sk
);
533 return err
? err
: outlen
;
536 static int aead_recvmsg_sync(struct socket
*sock
, struct msghdr
*msg
, int flags
)
538 struct sock
*sk
= sock
->sk
;
539 struct alg_sock
*ask
= alg_sk(sk
);
540 struct aead_ctx
*ctx
= ask
->private;
541 unsigned as
= crypto_aead_authsize(crypto_aead_reqtfm(&ctx
->aead_req
));
542 struct aead_sg_list
*sgl
= &ctx
->tsgl
;
543 struct aead_async_rsgl
*last_rsgl
= NULL
;
544 struct aead_async_rsgl
*rsgl
, *tmp
;
546 unsigned long used
= 0;
548 size_t usedpages
= 0;
553 * AEAD memory structure: For encryption, the tag is appended to the
554 * ciphertext which implies that the memory allocated for the ciphertext
555 * must be increased by the tag length. For decryption, the tag
556 * is expected to be concatenated to the ciphertext. The plaintext
557 * therefore has a memory size of the ciphertext minus the tag length.
559 * The memory structure for cipher operation has the following
561 * AEAD encryption input: assoc data || plaintext
562 * AEAD encryption output: cipherntext || auth tag
563 * AEAD decryption input: assoc data || ciphertext || auth tag
564 * AEAD decryption output: plaintext
568 err
= aead_wait_for_data(sk
, flags
);
576 * Make sure sufficient data is present -- note, the same check is
577 * is also present in sendmsg/sendpage. The checks in sendpage/sendmsg
578 * shall provide an information to the data sender that something is
579 * wrong, but they are irrelevant to maintain the kernel integrity.
580 * We need this check here too in case user space decides to not honor
581 * the error message in sendmsg/sendpage and still call recvmsg. This
582 * check here protects the kernel integrity.
584 if (!aead_sufficient_data(ctx
))
590 * The cipher operation input data is reduced by the associated data
591 * length as this data is processed separately later on.
593 used
-= ctx
->aead_assoclen
+ (ctx
->enc
? as
: 0);
595 /* convert iovecs of output buffers into scatterlists */
596 while (iov_iter_count(&msg
->msg_iter
)) {
597 size_t seglen
= min_t(size_t, iov_iter_count(&msg
->msg_iter
),
598 (outlen
- usedpages
));
600 if (list_empty(&ctx
->list
)) {
601 rsgl
= &ctx
->first_rsgl
;
603 rsgl
= sock_kmalloc(sk
, sizeof(*rsgl
), GFP_KERNEL
);
604 if (unlikely(!rsgl
)) {
609 rsgl
->sgl
.npages
= 0;
610 list_add_tail(&rsgl
->list
, &ctx
->list
);
612 /* make one iovec available as scatterlist */
613 err
= af_alg_make_sg(&rsgl
->sgl
, &msg
->msg_iter
, seglen
);
617 /* chain the new scatterlist with previous one */
619 af_alg_link_sg(&last_rsgl
->sgl
, &rsgl
->sgl
);
623 /* we do not need more iovecs as we have sufficient memory */
624 if (outlen
<= usedpages
)
626 iov_iter_advance(&msg
->msg_iter
, err
);
630 /* ensure output buffer is sufficiently large */
631 if (usedpages
< outlen
)
634 sg_mark_end(sgl
->sg
+ sgl
->cur
- 1);
635 aead_request_set_crypt(&ctx
->aead_req
, sgl
->sg
, ctx
->first_rsgl
.sgl
.sg
,
637 aead_request_set_ad(&ctx
->aead_req
, ctx
->aead_assoclen
);
639 err
= af_alg_wait_for_completion(ctx
->enc
?
640 crypto_aead_encrypt(&ctx
->aead_req
) :
641 crypto_aead_decrypt(&ctx
->aead_req
),
645 /* EBADMSG implies a valid cipher operation took place */
656 list_for_each_entry_safe(rsgl
, tmp
, &ctx
->list
, list
) {
657 af_alg_free_sg(&rsgl
->sgl
);
658 if (rsgl
!= &ctx
->first_rsgl
)
659 sock_kfree_s(sk
, rsgl
, sizeof(*rsgl
));
660 list_del(&rsgl
->list
);
662 INIT_LIST_HEAD(&ctx
->list
);
663 aead_wmem_wakeup(sk
);
666 return err
? err
: outlen
;
669 static int aead_recvmsg(struct socket
*sock
, struct msghdr
*msg
, size_t ignored
,
672 return (msg
->msg_iocb
&& !is_sync_kiocb(msg
->msg_iocb
)) ?
673 aead_recvmsg_async(sock
, msg
, flags
) :
674 aead_recvmsg_sync(sock
, msg
, flags
);
677 static unsigned int aead_poll(struct file
*file
, struct socket
*sock
,
680 struct sock
*sk
= sock
->sk
;
681 struct alg_sock
*ask
= alg_sk(sk
);
682 struct aead_ctx
*ctx
= ask
->private;
685 sock_poll_wait(file
, sk_sleep(sk
), wait
);
689 mask
|= POLLIN
| POLLRDNORM
;
691 if (aead_writable(sk
))
692 mask
|= POLLOUT
| POLLWRNORM
| POLLWRBAND
;
697 static struct proto_ops algif_aead_ops
= {
700 .connect
= sock_no_connect
,
701 .socketpair
= sock_no_socketpair
,
702 .getname
= sock_no_getname
,
703 .ioctl
= sock_no_ioctl
,
704 .listen
= sock_no_listen
,
705 .shutdown
= sock_no_shutdown
,
706 .getsockopt
= sock_no_getsockopt
,
707 .mmap
= sock_no_mmap
,
708 .bind
= sock_no_bind
,
709 .accept
= sock_no_accept
,
710 .setsockopt
= sock_no_setsockopt
,
712 .release
= af_alg_release
,
713 .sendmsg
= aead_sendmsg
,
714 .sendpage
= aead_sendpage
,
715 .recvmsg
= aead_recvmsg
,
719 static void *aead_bind(const char *name
, u32 type
, u32 mask
)
721 return crypto_alloc_aead(name
, type
, mask
);
724 static void aead_release(void *private)
726 crypto_free_aead(private);
729 static int aead_setauthsize(void *private, unsigned int authsize
)
731 return crypto_aead_setauthsize(private, authsize
);
734 static int aead_setkey(void *private, const u8
*key
, unsigned int keylen
)
736 return crypto_aead_setkey(private, key
, keylen
);
739 static void aead_sock_destruct(struct sock
*sk
)
741 struct alg_sock
*ask
= alg_sk(sk
);
742 struct aead_ctx
*ctx
= ask
->private;
743 unsigned int ivlen
= crypto_aead_ivsize(
744 crypto_aead_reqtfm(&ctx
->aead_req
));
746 WARN_ON(atomic_read(&sk
->sk_refcnt
) != 0);
748 sock_kzfree_s(sk
, ctx
->iv
, ivlen
);
749 sock_kfree_s(sk
, ctx
, ctx
->len
);
750 af_alg_release_parent(sk
);
753 static int aead_accept_parent(void *private, struct sock
*sk
)
755 struct aead_ctx
*ctx
;
756 struct alg_sock
*ask
= alg_sk(sk
);
757 unsigned int len
= sizeof(*ctx
) + crypto_aead_reqsize(private);
758 unsigned int ivlen
= crypto_aead_ivsize(private);
760 ctx
= sock_kmalloc(sk
, len
, GFP_KERNEL
);
765 ctx
->iv
= sock_kmalloc(sk
, ivlen
, GFP_KERNEL
);
767 sock_kfree_s(sk
, ctx
, len
);
770 memset(ctx
->iv
, 0, ivlen
);
778 ctx
->aead_assoclen
= 0;
779 af_alg_init_completion(&ctx
->completion
);
780 sg_init_table(ctx
->tsgl
.sg
, ALG_MAX_PAGES
);
781 INIT_LIST_HEAD(&ctx
->list
);
785 aead_request_set_tfm(&ctx
->aead_req
, private);
786 aead_request_set_callback(&ctx
->aead_req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
787 af_alg_complete
, &ctx
->completion
);
789 sk
->sk_destruct
= aead_sock_destruct
;
794 static const struct af_alg_type algif_type_aead
= {
796 .release
= aead_release
,
797 .setkey
= aead_setkey
,
798 .setauthsize
= aead_setauthsize
,
799 .accept
= aead_accept_parent
,
800 .ops
= &algif_aead_ops
,
805 static int __init
algif_aead_init(void)
807 return af_alg_register_type(&algif_type_aead
);
810 static void __exit
algif_aead_exit(void)
812 int err
= af_alg_unregister_type(&algif_type_aead
);
816 module_init(algif_aead_init
);
817 module_exit(algif_aead_exit
);
818 MODULE_LICENSE("GPL");
819 MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
820 MODULE_DESCRIPTION("AEAD kernel crypto API user space interface");