2 * algif_aead: User-space interface for AEAD algorithms
4 * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
6 * This file provides the user-space API for AEAD ciphers.
8 * This file is derived from algif_skcipher.c.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
16 #include <crypto/internal/aead.h>
17 #include <crypto/scatterwalk.h>
18 #include <crypto/if_alg.h>
19 #include <linux/init.h>
20 #include <linux/list.h>
21 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/net.h>
29 struct scatterlist sg
[ALG_MAX_PAGES
];
32 struct aead_async_rsgl
{
33 struct af_alg_sgl sgl
;
34 struct list_head list
;
37 struct aead_async_req
{
38 struct scatterlist
*tsgl
;
39 struct aead_async_rsgl first_rsgl
;
40 struct list_head list
;
47 struct aead_sg_list tsgl
;
48 struct aead_async_rsgl first_rsgl
;
49 struct list_head list
;
53 struct af_alg_completion completion
;
63 struct aead_request aead_req
;
66 static inline int aead_sndbuf(struct sock
*sk
)
68 struct alg_sock
*ask
= alg_sk(sk
);
69 struct aead_ctx
*ctx
= ask
->private;
71 return max_t(int, max_t(int, sk
->sk_sndbuf
& PAGE_MASK
, PAGE_SIZE
) -
75 static inline bool aead_writable(struct sock
*sk
)
77 return PAGE_SIZE
<= aead_sndbuf(sk
);
80 static inline bool aead_sufficient_data(struct aead_ctx
*ctx
)
82 unsigned as
= crypto_aead_authsize(crypto_aead_reqtfm(&ctx
->aead_req
));
84 return ctx
->used
>= ctx
->aead_assoclen
+ as
;
87 static void aead_reset_ctx(struct aead_ctx
*ctx
)
89 struct aead_sg_list
*sgl
= &ctx
->tsgl
;
91 sg_init_table(sgl
->sg
, ALG_MAX_PAGES
);
98 static void aead_put_sgl(struct sock
*sk
)
100 struct alg_sock
*ask
= alg_sk(sk
);
101 struct aead_ctx
*ctx
= ask
->private;
102 struct aead_sg_list
*sgl
= &ctx
->tsgl
;
103 struct scatterlist
*sg
= sgl
->sg
;
106 for (i
= 0; i
< sgl
->cur
; i
++) {
107 if (!sg_page(sg
+ i
))
110 put_page(sg_page(sg
+ i
));
111 sg_assign_page(sg
+ i
, NULL
);
116 static void aead_wmem_wakeup(struct sock
*sk
)
118 struct socket_wq
*wq
;
120 if (!aead_writable(sk
))
124 wq
= rcu_dereference(sk
->sk_wq
);
125 if (skwq_has_sleeper(wq
))
126 wake_up_interruptible_sync_poll(&wq
->wait
, POLLIN
|
129 sk_wake_async(sk
, SOCK_WAKE_WAITD
, POLL_IN
);
133 static int aead_wait_for_data(struct sock
*sk
, unsigned flags
)
135 struct alg_sock
*ask
= alg_sk(sk
);
136 struct aead_ctx
*ctx
= ask
->private;
139 int err
= -ERESTARTSYS
;
141 if (flags
& MSG_DONTWAIT
)
144 sk_set_bit(SOCKWQ_ASYNC_WAITDATA
, sk
);
147 if (signal_pending(current
))
149 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
150 timeout
= MAX_SCHEDULE_TIMEOUT
;
151 if (sk_wait_event(sk
, &timeout
, !ctx
->more
)) {
156 finish_wait(sk_sleep(sk
), &wait
);
158 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA
, sk
);
163 static void aead_data_wakeup(struct sock
*sk
)
165 struct alg_sock
*ask
= alg_sk(sk
);
166 struct aead_ctx
*ctx
= ask
->private;
167 struct socket_wq
*wq
;
175 wq
= rcu_dereference(sk
->sk_wq
);
176 if (skwq_has_sleeper(wq
))
177 wake_up_interruptible_sync_poll(&wq
->wait
, POLLOUT
|
180 sk_wake_async(sk
, SOCK_WAKE_SPACE
, POLL_OUT
);
184 static int aead_sendmsg(struct socket
*sock
, struct msghdr
*msg
, size_t size
)
186 struct sock
*sk
= sock
->sk
;
187 struct alg_sock
*ask
= alg_sk(sk
);
188 struct aead_ctx
*ctx
= ask
->private;
190 crypto_aead_ivsize(crypto_aead_reqtfm(&ctx
->aead_req
));
191 struct aead_sg_list
*sgl
= &ctx
->tsgl
;
192 struct af_alg_control con
= {};
198 if (msg
->msg_controllen
) {
199 err
= af_alg_cmsg_send(msg
, &con
);
215 if (con
.iv
&& con
.iv
->ivlen
!= ivsize
)
220 if (!ctx
->more
&& ctx
->used
)
226 memcpy(ctx
->iv
, con
.iv
->iv
, ivsize
);
228 ctx
->aead_assoclen
= con
.aead_assoclen
;
233 struct scatterlist
*sg
= NULL
;
235 /* use the existing memory in an allocated page */
237 sg
= sgl
->sg
+ sgl
->cur
- 1;
238 len
= min_t(unsigned long, len
,
239 PAGE_SIZE
- sg
->offset
- sg
->length
);
240 err
= memcpy_from_msg(page_address(sg_page(sg
)) +
241 sg
->offset
+ sg
->length
,
247 ctx
->merge
= (sg
->offset
+ sg
->length
) &
256 if (!aead_writable(sk
)) {
257 /* user space sent too much data */
263 /* allocate a new page */
264 len
= min_t(unsigned long, size
, aead_sndbuf(sk
));
268 if (sgl
->cur
>= ALG_MAX_PAGES
) {
274 sg
= sgl
->sg
+ sgl
->cur
;
275 plen
= min_t(size_t, len
, PAGE_SIZE
);
277 sg_assign_page(sg
, alloc_page(GFP_KERNEL
));
282 err
= memcpy_from_msg(page_address(sg_page(sg
)),
285 __free_page(sg_page(sg
));
286 sg_assign_page(sg
, NULL
);
297 ctx
->merge
= plen
& (PAGE_SIZE
- 1);
303 ctx
->more
= msg
->msg_flags
& MSG_MORE
;
304 if (!ctx
->more
&& !aead_sufficient_data(ctx
)) {
310 aead_data_wakeup(sk
);
313 return err
?: copied
;
316 static ssize_t
aead_sendpage(struct socket
*sock
, struct page
*page
,
317 int offset
, size_t size
, int flags
)
319 struct sock
*sk
= sock
->sk
;
320 struct alg_sock
*ask
= alg_sk(sk
);
321 struct aead_ctx
*ctx
= ask
->private;
322 struct aead_sg_list
*sgl
= &ctx
->tsgl
;
325 if (flags
& MSG_SENDPAGE_NOTLAST
)
328 if (sgl
->cur
>= ALG_MAX_PAGES
)
332 if (!ctx
->more
&& ctx
->used
)
338 if (!aead_writable(sk
)) {
339 /* user space sent too much data */
348 sg_set_page(sgl
->sg
+ sgl
->cur
, page
, size
, offset
);
355 ctx
->more
= flags
& MSG_MORE
;
356 if (!ctx
->more
&& !aead_sufficient_data(ctx
)) {
362 aead_data_wakeup(sk
);
368 #define GET_ASYM_REQ(req, tfm) (struct aead_async_req *) \
369 ((char *)req + sizeof(struct aead_request) + \
370 crypto_aead_reqsize(tfm))
372 #define GET_REQ_SIZE(tfm) sizeof(struct aead_async_req) + \
373 crypto_aead_reqsize(tfm) + crypto_aead_ivsize(tfm) + \
374 sizeof(struct aead_request)
376 static void aead_async_cb(struct crypto_async_request
*_req
, int err
)
378 struct sock
*sk
= _req
->data
;
379 struct alg_sock
*ask
= alg_sk(sk
);
380 struct aead_ctx
*ctx
= ask
->private;
381 struct crypto_aead
*tfm
= crypto_aead_reqtfm(&ctx
->aead_req
);
382 struct aead_request
*req
= aead_request_cast(_req
);
383 struct aead_async_req
*areq
= GET_ASYM_REQ(req
, tfm
);
384 struct scatterlist
*sg
= areq
->tsgl
;
385 struct aead_async_rsgl
*rsgl
;
386 struct kiocb
*iocb
= areq
->iocb
;
387 unsigned int i
, reqlen
= GET_REQ_SIZE(tfm
);
389 list_for_each_entry(rsgl
, &areq
->list
, list
) {
390 af_alg_free_sg(&rsgl
->sgl
);
391 if (rsgl
!= &areq
->first_rsgl
)
392 sock_kfree_s(sk
, rsgl
, sizeof(*rsgl
));
395 for (i
= 0; i
< areq
->tsgls
; i
++)
396 put_page(sg_page(sg
+ i
));
398 sock_kfree_s(sk
, areq
->tsgl
, sizeof(*areq
->tsgl
) * areq
->tsgls
);
399 sock_kfree_s(sk
, req
, reqlen
);
401 iocb
->ki_complete(iocb
, err
, err
);
404 static int aead_recvmsg_async(struct socket
*sock
, struct msghdr
*msg
,
407 struct sock
*sk
= sock
->sk
;
408 struct alg_sock
*ask
= alg_sk(sk
);
409 struct aead_ctx
*ctx
= ask
->private;
410 struct crypto_aead
*tfm
= crypto_aead_reqtfm(&ctx
->aead_req
);
411 struct aead_async_req
*areq
;
412 struct aead_request
*req
= NULL
;
413 struct aead_sg_list
*sgl
= &ctx
->tsgl
;
414 struct aead_async_rsgl
*last_rsgl
= NULL
, *rsgl
;
415 unsigned int as
= crypto_aead_authsize(tfm
);
416 unsigned int i
, reqlen
= GET_REQ_SIZE(tfm
);
420 size_t usedpages
= 0;
424 err
= aead_wait_for_data(sk
, flags
);
432 if (!aead_sufficient_data(ctx
))
435 req
= sock_kmalloc(sk
, reqlen
, GFP_KERNEL
);
439 areq
= GET_ASYM_REQ(req
, tfm
);
440 memset(&areq
->first_rsgl
, '\0', sizeof(areq
->first_rsgl
));
441 INIT_LIST_HEAD(&areq
->list
);
442 areq
->iocb
= msg
->msg_iocb
;
443 memcpy(areq
->iv
, ctx
->iv
, crypto_aead_ivsize(tfm
));
444 aead_request_set_tfm(req
, tfm
);
445 aead_request_set_ad(req
, ctx
->aead_assoclen
);
446 aead_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
448 used
-= ctx
->aead_assoclen
+ (ctx
->enc
? as
: 0);
450 /* take over all tx sgls from ctx */
451 areq
->tsgl
= sock_kmalloc(sk
, sizeof(*areq
->tsgl
) * sgl
->cur
,
453 if (unlikely(!areq
->tsgl
))
456 sg_init_table(areq
->tsgl
, sgl
->cur
);
457 for (i
= 0; i
< sgl
->cur
; i
++)
458 sg_set_page(&areq
->tsgl
[i
], sg_page(&sgl
->sg
[i
]),
459 sgl
->sg
[i
].length
, sgl
->sg
[i
].offset
);
461 areq
->tsgls
= sgl
->cur
;
464 while (iov_iter_count(&msg
->msg_iter
)) {
465 size_t seglen
= min_t(size_t, iov_iter_count(&msg
->msg_iter
),
466 (outlen
- usedpages
));
468 if (list_empty(&areq
->list
)) {
469 rsgl
= &areq
->first_rsgl
;
472 rsgl
= sock_kmalloc(sk
, sizeof(*rsgl
), GFP_KERNEL
);
473 if (unlikely(!rsgl
)) {
478 rsgl
->sgl
.npages
= 0;
479 list_add_tail(&rsgl
->list
, &areq
->list
);
481 /* make one iovec available as scatterlist */
482 err
= af_alg_make_sg(&rsgl
->sgl
, &msg
->msg_iter
, seglen
);
488 /* chain the new scatterlist with previous one */
490 af_alg_link_sg(&last_rsgl
->sgl
, &rsgl
->sgl
);
494 /* we do not need more iovecs as we have sufficient memory */
495 if (outlen
<= usedpages
)
498 iov_iter_advance(&msg
->msg_iter
, err
);
501 /* ensure output buffer is sufficiently large */
502 if (usedpages
< outlen
)
505 aead_request_set_crypt(req
, areq
->tsgl
, areq
->first_rsgl
.sgl
.sg
, used
,
507 err
= ctx
->enc
? crypto_aead_encrypt(req
) : crypto_aead_decrypt(req
);
509 if (err
== -EINPROGRESS
) {
514 } else if (err
== -EBADMSG
) {
522 list_for_each_entry(rsgl
, &areq
->list
, list
) {
523 af_alg_free_sg(&rsgl
->sgl
);
524 if (rsgl
!= &areq
->first_rsgl
)
525 sock_kfree_s(sk
, rsgl
, sizeof(*rsgl
));
528 sock_kfree_s(sk
, areq
->tsgl
, sizeof(*areq
->tsgl
) * areq
->tsgls
);
530 sock_kfree_s(sk
, req
, reqlen
);
532 aead_wmem_wakeup(sk
);
534 return err
? err
: outlen
;
537 static int aead_recvmsg_sync(struct socket
*sock
, struct msghdr
*msg
, int flags
)
539 struct sock
*sk
= sock
->sk
;
540 struct alg_sock
*ask
= alg_sk(sk
);
541 struct aead_ctx
*ctx
= ask
->private;
542 unsigned as
= crypto_aead_authsize(crypto_aead_reqtfm(&ctx
->aead_req
));
543 struct aead_sg_list
*sgl
= &ctx
->tsgl
;
544 struct aead_async_rsgl
*last_rsgl
= NULL
;
545 struct aead_async_rsgl
*rsgl
, *tmp
;
547 unsigned long used
= 0;
549 size_t usedpages
= 0;
554 * AEAD memory structure: For encryption, the tag is appended to the
555 * ciphertext which implies that the memory allocated for the ciphertext
556 * must be increased by the tag length. For decryption, the tag
557 * is expected to be concatenated to the ciphertext. The plaintext
558 * therefore has a memory size of the ciphertext minus the tag length.
560 * The memory structure for cipher operation has the following
562 * AEAD encryption input: assoc data || plaintext
563 * AEAD encryption output: cipherntext || auth tag
564 * AEAD decryption input: assoc data || ciphertext || auth tag
565 * AEAD decryption output: plaintext
569 err
= aead_wait_for_data(sk
, flags
);
577 * Make sure sufficient data is present -- note, the same check is
578 * is also present in sendmsg/sendpage. The checks in sendpage/sendmsg
579 * shall provide an information to the data sender that something is
580 * wrong, but they are irrelevant to maintain the kernel integrity.
581 * We need this check here too in case user space decides to not honor
582 * the error message in sendmsg/sendpage and still call recvmsg. This
583 * check here protects the kernel integrity.
585 if (!aead_sufficient_data(ctx
))
591 * The cipher operation input data is reduced by the associated data
592 * length as this data is processed separately later on.
594 used
-= ctx
->aead_assoclen
+ (ctx
->enc
? as
: 0);
596 /* convert iovecs of output buffers into scatterlists */
597 while (iov_iter_count(&msg
->msg_iter
)) {
598 size_t seglen
= min_t(size_t, iov_iter_count(&msg
->msg_iter
),
599 (outlen
- usedpages
));
601 if (list_empty(&ctx
->list
)) {
602 rsgl
= &ctx
->first_rsgl
;
604 rsgl
= sock_kmalloc(sk
, sizeof(*rsgl
), GFP_KERNEL
);
605 if (unlikely(!rsgl
)) {
610 rsgl
->sgl
.npages
= 0;
611 list_add_tail(&rsgl
->list
, &ctx
->list
);
613 /* make one iovec available as scatterlist */
614 err
= af_alg_make_sg(&rsgl
->sgl
, &msg
->msg_iter
, seglen
);
618 /* chain the new scatterlist with previous one */
620 af_alg_link_sg(&last_rsgl
->sgl
, &rsgl
->sgl
);
624 /* we do not need more iovecs as we have sufficient memory */
625 if (outlen
<= usedpages
)
627 iov_iter_advance(&msg
->msg_iter
, err
);
631 /* ensure output buffer is sufficiently large */
632 if (usedpages
< outlen
)
635 sg_mark_end(sgl
->sg
+ sgl
->cur
- 1);
636 aead_request_set_crypt(&ctx
->aead_req
, sgl
->sg
, ctx
->first_rsgl
.sgl
.sg
,
638 aead_request_set_ad(&ctx
->aead_req
, ctx
->aead_assoclen
);
640 err
= af_alg_wait_for_completion(ctx
->enc
?
641 crypto_aead_encrypt(&ctx
->aead_req
) :
642 crypto_aead_decrypt(&ctx
->aead_req
),
646 /* EBADMSG implies a valid cipher operation took place */
657 list_for_each_entry_safe(rsgl
, tmp
, &ctx
->list
, list
) {
658 af_alg_free_sg(&rsgl
->sgl
);
659 if (rsgl
!= &ctx
->first_rsgl
)
660 sock_kfree_s(sk
, rsgl
, sizeof(*rsgl
));
661 list_del(&rsgl
->list
);
663 INIT_LIST_HEAD(&ctx
->list
);
664 aead_wmem_wakeup(sk
);
667 return err
? err
: outlen
;
670 static int aead_recvmsg(struct socket
*sock
, struct msghdr
*msg
, size_t ignored
,
673 return (msg
->msg_iocb
&& !is_sync_kiocb(msg
->msg_iocb
)) ?
674 aead_recvmsg_async(sock
, msg
, flags
) :
675 aead_recvmsg_sync(sock
, msg
, flags
);
678 static unsigned int aead_poll(struct file
*file
, struct socket
*sock
,
681 struct sock
*sk
= sock
->sk
;
682 struct alg_sock
*ask
= alg_sk(sk
);
683 struct aead_ctx
*ctx
= ask
->private;
686 sock_poll_wait(file
, sk_sleep(sk
), wait
);
690 mask
|= POLLIN
| POLLRDNORM
;
692 if (aead_writable(sk
))
693 mask
|= POLLOUT
| POLLWRNORM
| POLLWRBAND
;
698 static struct proto_ops algif_aead_ops
= {
701 .connect
= sock_no_connect
,
702 .socketpair
= sock_no_socketpair
,
703 .getname
= sock_no_getname
,
704 .ioctl
= sock_no_ioctl
,
705 .listen
= sock_no_listen
,
706 .shutdown
= sock_no_shutdown
,
707 .getsockopt
= sock_no_getsockopt
,
708 .mmap
= sock_no_mmap
,
709 .bind
= sock_no_bind
,
710 .accept
= sock_no_accept
,
711 .setsockopt
= sock_no_setsockopt
,
713 .release
= af_alg_release
,
714 .sendmsg
= aead_sendmsg
,
715 .sendpage
= aead_sendpage
,
716 .recvmsg
= aead_recvmsg
,
720 static void *aead_bind(const char *name
, u32 type
, u32 mask
)
722 return crypto_alloc_aead(name
, type
, mask
);
725 static void aead_release(void *private)
727 crypto_free_aead(private);
730 static int aead_setauthsize(void *private, unsigned int authsize
)
732 return crypto_aead_setauthsize(private, authsize
);
735 static int aead_setkey(void *private, const u8
*key
, unsigned int keylen
)
737 return crypto_aead_setkey(private, key
, keylen
);
740 static void aead_sock_destruct(struct sock
*sk
)
742 struct alg_sock
*ask
= alg_sk(sk
);
743 struct aead_ctx
*ctx
= ask
->private;
744 unsigned int ivlen
= crypto_aead_ivsize(
745 crypto_aead_reqtfm(&ctx
->aead_req
));
747 WARN_ON(atomic_read(&sk
->sk_refcnt
) != 0);
749 sock_kzfree_s(sk
, ctx
->iv
, ivlen
);
750 sock_kfree_s(sk
, ctx
, ctx
->len
);
751 af_alg_release_parent(sk
);
754 static int aead_accept_parent(void *private, struct sock
*sk
)
756 struct aead_ctx
*ctx
;
757 struct alg_sock
*ask
= alg_sk(sk
);
758 unsigned int len
= sizeof(*ctx
) + crypto_aead_reqsize(private);
759 unsigned int ivlen
= crypto_aead_ivsize(private);
761 ctx
= sock_kmalloc(sk
, len
, GFP_KERNEL
);
766 ctx
->iv
= sock_kmalloc(sk
, ivlen
, GFP_KERNEL
);
768 sock_kfree_s(sk
, ctx
, len
);
771 memset(ctx
->iv
, 0, ivlen
);
779 ctx
->aead_assoclen
= 0;
780 af_alg_init_completion(&ctx
->completion
);
781 sg_init_table(ctx
->tsgl
.sg
, ALG_MAX_PAGES
);
782 INIT_LIST_HEAD(&ctx
->list
);
786 aead_request_set_tfm(&ctx
->aead_req
, private);
787 aead_request_set_callback(&ctx
->aead_req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
788 af_alg_complete
, &ctx
->completion
);
790 sk
->sk_destruct
= aead_sock_destruct
;
795 static const struct af_alg_type algif_type_aead
= {
797 .release
= aead_release
,
798 .setkey
= aead_setkey
,
799 .setauthsize
= aead_setauthsize
,
800 .accept
= aead_accept_parent
,
801 .ops
= &algif_aead_ops
,
806 static int __init
algif_aead_init(void)
808 return af_alg_register_type(&algif_type_aead
);
811 static void __exit
algif_aead_exit(void)
813 int err
= af_alg_unregister_type(&algif_type_aead
);
817 module_init(algif_aead_init
);
818 module_exit(algif_aead_exit
);
819 MODULE_LICENSE("GPL");
820 MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
821 MODULE_DESCRIPTION("AEAD kernel crypto API user space interface");