Merge tag 'sched_ext-for-6.12-rc2-fixes' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-stable.git] / crypto / algif_skcipher.c
blob125d395c5e009e246bde4e7a544a0c0b29d8e823
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * algif_skcipher: User-space interface for skcipher algorithms
5 * This file provides the user-space API for symmetric key ciphers.
7 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
9 * The following concept of the memory management is used:
11 * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is
12 * filled by user space with the data submitted via sendmsg. Filling up the TX
13 * SGL does not cause a crypto operation -- the data will only be tracked by
14 * the kernel. Upon receipt of one recvmsg call, the caller must provide a
15 * buffer which is tracked with the RX SGL.
17 * During the processing of the recvmsg operation, the cipher request is
18 * allocated and prepared. As part of the recvmsg operation, the processed
19 * TX buffers are extracted from the TX SGL into a separate SGL.
21 * After the completion of the crypto operation, the RX SGL and the cipher
22 * request is released. The extracted TX SGL parts are released together with
23 * the RX SGL release.
26 #include <crypto/scatterwalk.h>
27 #include <crypto/skcipher.h>
28 #include <crypto/if_alg.h>
29 #include <linux/init.h>
30 #include <linux/list.h>
31 #include <linux/kernel.h>
32 #include <linux/mm.h>
33 #include <linux/module.h>
34 #include <linux/net.h>
35 #include <net/sock.h>
37 static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
38 size_t size)
40 struct sock *sk = sock->sk;
41 struct alg_sock *ask = alg_sk(sk);
42 struct sock *psk = ask->parent;
43 struct alg_sock *pask = alg_sk(psk);
44 struct crypto_skcipher *tfm = pask->private;
45 unsigned ivsize = crypto_skcipher_ivsize(tfm);
47 return af_alg_sendmsg(sock, msg, size, ivsize);
50 static int algif_skcipher_export(struct sock *sk, struct skcipher_request *req)
52 struct alg_sock *ask = alg_sk(sk);
53 struct crypto_skcipher *tfm;
54 struct af_alg_ctx *ctx;
55 struct alg_sock *pask;
56 unsigned statesize;
57 struct sock *psk;
58 int err;
60 if (!(req->base.flags & CRYPTO_SKCIPHER_REQ_NOTFINAL))
61 return 0;
63 ctx = ask->private;
64 psk = ask->parent;
65 pask = alg_sk(psk);
66 tfm = pask->private;
68 statesize = crypto_skcipher_statesize(tfm);
69 ctx->state = sock_kmalloc(sk, statesize, GFP_ATOMIC);
70 if (!ctx->state)
71 return -ENOMEM;
73 err = crypto_skcipher_export(req, ctx->state);
74 if (err) {
75 sock_kzfree_s(sk, ctx->state, statesize);
76 ctx->state = NULL;
79 return err;
82 static void algif_skcipher_done(void *data, int err)
84 struct af_alg_async_req *areq = data;
85 struct sock *sk = areq->sk;
87 if (err)
88 goto out;
90 err = algif_skcipher_export(sk, &areq->cra_u.skcipher_req);
92 out:
93 af_alg_async_cb(data, err);
96 static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
97 size_t ignored, int flags)
99 struct sock *sk = sock->sk;
100 struct alg_sock *ask = alg_sk(sk);
101 struct sock *psk = ask->parent;
102 struct alg_sock *pask = alg_sk(psk);
103 struct af_alg_ctx *ctx = ask->private;
104 struct crypto_skcipher *tfm = pask->private;
105 unsigned int bs = crypto_skcipher_chunksize(tfm);
106 struct af_alg_async_req *areq;
107 unsigned cflags = 0;
108 int err = 0;
109 size_t len = 0;
111 if (!ctx->init || (ctx->more && ctx->used < bs)) {
112 err = af_alg_wait_for_data(sk, flags, bs);
113 if (err)
114 return err;
117 /* Allocate cipher request for current operation. */
118 areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) +
119 crypto_skcipher_reqsize(tfm));
120 if (IS_ERR(areq))
121 return PTR_ERR(areq);
123 /* convert iovecs of output buffers into RX SGL */
124 err = af_alg_get_rsgl(sk, msg, flags, areq, ctx->used, &len);
125 if (err)
126 goto free;
129 * If more buffers are to be expected to be processed, process only
130 * full block size buffers.
132 if (ctx->more || len < ctx->used) {
133 len -= len % bs;
134 cflags |= CRYPTO_SKCIPHER_REQ_NOTFINAL;
138 * Create a per request TX SGL for this request which tracks the
139 * SG entries from the global TX SGL.
141 areq->tsgl_entries = af_alg_count_tsgl(sk, len, 0);
142 if (!areq->tsgl_entries)
143 areq->tsgl_entries = 1;
144 areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
145 areq->tsgl_entries),
146 GFP_KERNEL);
147 if (!areq->tsgl) {
148 err = -ENOMEM;
149 goto free;
151 sg_init_table(areq->tsgl, areq->tsgl_entries);
152 af_alg_pull_tsgl(sk, len, areq->tsgl, 0);
154 /* Initialize the crypto operation */
155 skcipher_request_set_tfm(&areq->cra_u.skcipher_req, tfm);
156 skcipher_request_set_crypt(&areq->cra_u.skcipher_req, areq->tsgl,
157 areq->first_rsgl.sgl.sgt.sgl, len, ctx->iv);
159 if (ctx->state) {
160 err = crypto_skcipher_import(&areq->cra_u.skcipher_req,
161 ctx->state);
162 sock_kzfree_s(sk, ctx->state, crypto_skcipher_statesize(tfm));
163 ctx->state = NULL;
164 if (err)
165 goto free;
166 cflags |= CRYPTO_SKCIPHER_REQ_CONT;
169 if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
170 /* AIO operation */
171 sock_hold(sk);
172 areq->iocb = msg->msg_iocb;
174 /* Remember output size that will be generated. */
175 areq->outlen = len;
177 skcipher_request_set_callback(&areq->cra_u.skcipher_req,
178 cflags |
179 CRYPTO_TFM_REQ_MAY_SLEEP,
180 algif_skcipher_done, areq);
181 err = ctx->enc ?
182 crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
183 crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
185 /* AIO operation in progress */
186 if (err == -EINPROGRESS)
187 return -EIOCBQUEUED;
189 sock_put(sk);
190 } else {
191 /* Synchronous operation */
192 skcipher_request_set_callback(&areq->cra_u.skcipher_req,
193 cflags |
194 CRYPTO_TFM_REQ_MAY_SLEEP |
195 CRYPTO_TFM_REQ_MAY_BACKLOG,
196 crypto_req_done, &ctx->wait);
197 err = crypto_wait_req(ctx->enc ?
198 crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
199 crypto_skcipher_decrypt(&areq->cra_u.skcipher_req),
200 &ctx->wait);
202 if (!err)
203 err = algif_skcipher_export(
204 sk, &areq->cra_u.skcipher_req);
207 free:
208 af_alg_free_resources(areq);
210 return err ? err : len;
213 static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
214 size_t ignored, int flags)
216 struct sock *sk = sock->sk;
217 int ret = 0;
219 lock_sock(sk);
220 while (msg_data_left(msg)) {
221 int err = _skcipher_recvmsg(sock, msg, ignored, flags);
224 * This error covers -EIOCBQUEUED which implies that we can
225 * only handle one AIO request. If the caller wants to have
226 * multiple AIO requests in parallel, he must make multiple
227 * separate AIO calls.
229 * Also return the error if no data has been processed so far.
231 if (err <= 0) {
232 if (err == -EIOCBQUEUED || !ret)
233 ret = err;
234 goto out;
237 ret += err;
240 out:
241 af_alg_wmem_wakeup(sk);
242 release_sock(sk);
243 return ret;
246 static struct proto_ops algif_skcipher_ops = {
247 .family = PF_ALG,
249 .connect = sock_no_connect,
250 .socketpair = sock_no_socketpair,
251 .getname = sock_no_getname,
252 .ioctl = sock_no_ioctl,
253 .listen = sock_no_listen,
254 .shutdown = sock_no_shutdown,
255 .mmap = sock_no_mmap,
256 .bind = sock_no_bind,
257 .accept = sock_no_accept,
259 .release = af_alg_release,
260 .sendmsg = skcipher_sendmsg,
261 .recvmsg = skcipher_recvmsg,
262 .poll = af_alg_poll,
265 static int skcipher_check_key(struct socket *sock)
267 int err = 0;
268 struct sock *psk;
269 struct alg_sock *pask;
270 struct crypto_skcipher *tfm;
271 struct sock *sk = sock->sk;
272 struct alg_sock *ask = alg_sk(sk);
274 lock_sock(sk);
275 if (!atomic_read(&ask->nokey_refcnt))
276 goto unlock_child;
278 psk = ask->parent;
279 pask = alg_sk(ask->parent);
280 tfm = pask->private;
282 err = -ENOKEY;
283 lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
284 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
285 goto unlock;
287 atomic_dec(&pask->nokey_refcnt);
288 atomic_set(&ask->nokey_refcnt, 0);
290 err = 0;
292 unlock:
293 release_sock(psk);
294 unlock_child:
295 release_sock(sk);
297 return err;
300 static int skcipher_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
301 size_t size)
303 int err;
305 err = skcipher_check_key(sock);
306 if (err)
307 return err;
309 return skcipher_sendmsg(sock, msg, size);
312 static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
313 size_t ignored, int flags)
315 int err;
317 err = skcipher_check_key(sock);
318 if (err)
319 return err;
321 return skcipher_recvmsg(sock, msg, ignored, flags);
324 static struct proto_ops algif_skcipher_ops_nokey = {
325 .family = PF_ALG,
327 .connect = sock_no_connect,
328 .socketpair = sock_no_socketpair,
329 .getname = sock_no_getname,
330 .ioctl = sock_no_ioctl,
331 .listen = sock_no_listen,
332 .shutdown = sock_no_shutdown,
333 .mmap = sock_no_mmap,
334 .bind = sock_no_bind,
335 .accept = sock_no_accept,
337 .release = af_alg_release,
338 .sendmsg = skcipher_sendmsg_nokey,
339 .recvmsg = skcipher_recvmsg_nokey,
340 .poll = af_alg_poll,
343 static void *skcipher_bind(const char *name, u32 type, u32 mask)
345 return crypto_alloc_skcipher(name, type, mask);
348 static void skcipher_release(void *private)
350 crypto_free_skcipher(private);
353 static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
355 return crypto_skcipher_setkey(private, key, keylen);
358 static void skcipher_sock_destruct(struct sock *sk)
360 struct alg_sock *ask = alg_sk(sk);
361 struct af_alg_ctx *ctx = ask->private;
362 struct sock *psk = ask->parent;
363 struct alg_sock *pask = alg_sk(psk);
364 struct crypto_skcipher *tfm = pask->private;
366 af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
367 sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
368 if (ctx->state)
369 sock_kzfree_s(sk, ctx->state, crypto_skcipher_statesize(tfm));
370 sock_kfree_s(sk, ctx, ctx->len);
371 af_alg_release_parent(sk);
374 static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
376 struct af_alg_ctx *ctx;
377 struct alg_sock *ask = alg_sk(sk);
378 struct crypto_skcipher *tfm = private;
379 unsigned int len = sizeof(*ctx);
381 ctx = sock_kmalloc(sk, len, GFP_KERNEL);
382 if (!ctx)
383 return -ENOMEM;
384 memset(ctx, 0, len);
386 ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(tfm),
387 GFP_KERNEL);
388 if (!ctx->iv) {
389 sock_kfree_s(sk, ctx, len);
390 return -ENOMEM;
392 memset(ctx->iv, 0, crypto_skcipher_ivsize(tfm));
394 INIT_LIST_HEAD(&ctx->tsgl_list);
395 ctx->len = len;
396 crypto_init_wait(&ctx->wait);
398 ask->private = ctx;
400 sk->sk_destruct = skcipher_sock_destruct;
402 return 0;
405 static int skcipher_accept_parent(void *private, struct sock *sk)
407 struct crypto_skcipher *tfm = private;
409 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
410 return -ENOKEY;
412 return skcipher_accept_parent_nokey(private, sk);
415 static const struct af_alg_type algif_type_skcipher = {
416 .bind = skcipher_bind,
417 .release = skcipher_release,
418 .setkey = skcipher_setkey,
419 .accept = skcipher_accept_parent,
420 .accept_nokey = skcipher_accept_parent_nokey,
421 .ops = &algif_skcipher_ops,
422 .ops_nokey = &algif_skcipher_ops_nokey,
423 .name = "skcipher",
424 .owner = THIS_MODULE
427 static int __init algif_skcipher_init(void)
429 return af_alg_register_type(&algif_type_skcipher);
432 static void __exit algif_skcipher_exit(void)
434 int err = af_alg_unregister_type(&algif_type_skcipher);
435 BUG_ON(err);
438 module_init(algif_skcipher_init);
439 module_exit(algif_skcipher_exit);
440 MODULE_DESCRIPTION("Userspace interface for skcipher algorithms");
441 MODULE_LICENSE("GPL");