kpacket_gen: use constants for cmdlength
[cor.git] / net / cor / sock_rdaemon.c
blob655a9d19f5a4e4cd9331f2f76597da8bd4ab20eb
1 /**
2 * Connection oriented routing
3 * Copyright (C) 2007-2019 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include "cor.h"
23 #define MAX_SND_MSGLEN 4096
24 #define MAX_MSG_LEN 256
26 struct rd_msg{
27 struct list_head lh;
29 struct list_head cs_lh;
30 struct cor_sock *cs;
32 __u32 type;
33 union{
34 }msg;
37 struct cor_rdsock{
38 struct sock sk;
40 atomic_t connected;
42 __u8 versioninited;
44 struct list_head socks;
46 struct mutex sndbuf_lock;
47 char snd_cmdplen_buf[8];
48 __u8 snd_cmdplen_read;
49 __u32 param_read;
50 char *cmdparams;
52 atomic_t ready_to_read;
53 struct list_head rcv_msgs; /* protected by rds_lock */
55 struct mutex rcvbuf_lock;
56 char rcvbuf[MAX_MSG_LEN+8];
57 __u32 rcvbuflen;
58 __u32 rcvbufoffset;
61 static struct kmem_cache *rdmsg_slab;
63 static DEFINE_MUTEX(rds_lock);
64 static struct cor_rdsock *crd = 0;
66 int cor_rd_socket_release(struct socket *sock)
68 mutex_lock(&rds_lock);
70 BUG_ON(((struct cor_rdsock *) sock->sk) != crd);
72 cor_neighbor_down();
74 while (list_empty(&(crd->rcv_msgs)) == 0) {
75 struct rd_msg *rdm = container_of(crd->rcv_msgs.next,
76 struct rd_msg, lh);
78 list_del(&(rdm->lh));
79 if (rdm->cs != 0) {
80 list_del(&(rdm->cs_lh));
81 kref_put(&(rdm->cs->ref), free_sock);
82 rdm->cs = 0;
84 kmem_cache_free(rdmsg_slab, rdm);
87 while (list_empty(&(crd->socks)) == 0) {
88 struct cor_sock *cs = container_of(crd->socks.next,
89 struct cor_sock, data.conn_managed.crd_lh);
91 BUG_ON(cs->type != CS_TYPE_CONN_MANAGED);
92 BUG_ON(cs->data.conn_managed.in_crd_list == 0);
93 list_del(&(cs->data.conn_managed.crd_lh));
94 cs->data.conn_managed.in_crd_list = 0;
95 _set_sock_connecterror(cs, -ENETUNREACH);
96 kref_put(&(cs->ref), free_sock);
99 if (crd->cmdparams != 0) {
100 kfree(crd->cmdparams);
101 crd->cmdparams = 0;
104 crd = 0;
106 mutex_unlock(&rds_lock);
108 sock_put(sock->sk);
110 return 0;
113 int cor_rd_socket_bind(struct socket *sock, struct sockaddr *saddr,
114 int sockaddr_len)
116 return -EOPNOTSUPP;
119 int cor_rd_socket_connect(struct socket *sock, struct sockaddr *saddr,
120 int sockaddr_len, int flags)
122 struct cor_rdsock *crd = (struct cor_rdsock *) sock->sk;
124 atomic_set(&(crd->connected), 1);
126 lock_sock(sock->sk);
127 sock->state = SS_CONNECTED;
128 release_sock(sock->sk);
129 return 0;
132 int cor_rd_socket_accept(struct socket *sock, struct socket *newsock, int flags,
133 bool kern)
135 return -EOPNOTSUPP;
138 int cor_rd_socket_listen(struct socket *sock, int len)
140 return -EOPNOTSUPP;
143 int cor_rd_socket_shutdown(struct socket *sock, int flags)
145 return 0;
148 int cor_rd_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
150 return -ENOIOCTLCMD;
153 int cor_rd_setsockopt(struct socket *sock, int level,
154 int optname, char __user *optval, unsigned int optlen)
156 return -ENOPROTOOPT;
159 int cor_rd_getsockopt(struct socket *sock, int level,
160 int optname, char __user *optval, int __user *optlen)
162 return -ENOPROTOOPT;
165 static int cor_rd_parse_version(struct cor_rdsock *crd, __u32 cmd,
166 char *param, __u32 paramlen)
168 int rc = 0;
169 __u32 version;
171 mutex_lock(&(rds_lock));
173 BUG_ON(crd == 0);
175 if (paramlen != 4)
176 goto err;
178 version = parse_u32(param);
179 if (version != 0)
180 goto err;
182 if (crd->versioninited != 0)
183 goto err;
185 crd->versioninited = 1;
187 if (0) {
188 err:
189 rc = 1;
191 mutex_unlock(&(rds_lock));
193 return rc;
196 static int cor_rd_parse_up(struct cor_rdsock *crd, __u32 cmd,
197 char *param, __u32 paramlen)
199 __u64 flags;
200 __u32 addrlen;
201 char *addr = 0;
203 if (unlikely(paramlen < 12))
204 return 1;
206 flags = parse_u64(param);
207 addrlen = parse_u32(param + 8);
209 if (addrlen > 0) {
210 if (unlikely(unlikely(addrlen > 64) ||
211 unlikely(paramlen < (12 + addrlen))))
212 return 1;
213 addr = param + 12;
216 if (cor_neighbor_up(addr, addrlen) != 0)
217 return 1;
219 return 0;
222 static int cor_rd_parse_connecterror(struct cor_rdsock *crd, __u32 cmd,
223 char *param, __u32 paramlen)
225 __be64 cookie;
226 __u32 error;
227 int errorno;
229 if (unlikely(paramlen < 12))
230 return 1;
232 cookie = parse_be64(param);
233 error = parse_u32(param + 8);
235 if (error == CRD_UTK_CONNECTERROR_ACCES) {
236 errorno = -EACCES;
237 } else if (error == CRD_UTK_CONNECTERROR_NETUNREACH) {
238 errorno = -ENETUNREACH;
239 } else if (error == CRD_UTK_CONNECTERROR_TIMEDOUT) {
240 errorno = -ETIMEDOUT;
241 } else if (error == CRD_UTK_CONNECTERROR_REFUSED) {
242 errorno = -ECONNREFUSED;
243 } else {
244 errorno = -ENETUNREACH;
247 set_sock_connecterror(cookie, errorno);
249 return 0;
252 static int cor_rd_parse(struct cor_rdsock *crd, __u32 cmd, char *param,
253 __u32 paramlen)
255 if (unlikely(unlikely(cmd != CRD_UTK_VERSION) &&
256 unlikely(crd->versioninited == 0)))
257 return 1;
259 if (cmd == CRD_UTK_VERSION) {
260 return cor_rd_parse_version(crd, cmd, param, paramlen);
261 } else if (cmd == CRD_UTK_UP) {
262 return cor_rd_parse_up(crd, cmd, param, paramlen);
263 } else if (cmd == CRD_UTK_CONNECTERROR) {
264 return cor_rd_parse_connecterror(crd, cmd, param, paramlen);
265 } else {
266 return 1;
270 static int _cor_rd_sendmsg_hdr(struct cor_rdsock *crd, struct msghdr *msg,
271 __u32 len)
273 __u32 cpy;
274 size_t st_rc;
276 BUG_ON(len == 0);
277 BUG_ON(len > (1024 * 1024 * 1024));
279 BUG_ON(crd->snd_cmdplen_read > 8);
280 cpy = (8 - crd->snd_cmdplen_read);
281 if (unlikely(cpy > len))
282 cpy = len;
284 st_rc = copy_from_iter(crd->snd_cmdplen_buf +
285 crd->snd_cmdplen_read, cpy, &(msg->msg_iter));
287 if (unlikely(st_rc != cpy))
288 return -EFAULT;
290 crd->snd_cmdplen_read += cpy;
292 return cpy;
295 static int _cor_rd_sendmsg_body(struct cor_rdsock *crd, struct msghdr *msg,
296 __u32 len)
298 __u32 cpy = 0;
300 __u32 cmd;
301 __u32 paramlen;
303 BUG_ON(len == 0);
304 BUG_ON(len > (1024 * 1024 * 1024));
306 BUG_ON(crd->snd_cmdplen_read != 8);
308 cmd = parse_u32(crd->snd_cmdplen_buf);
309 paramlen = parse_u32(crd->snd_cmdplen_buf + 4);
311 if (crd->cmdparams == 0 && paramlen != 0) {
312 BUG_ON(crd->param_read != 0);
313 if (unlikely(paramlen > MAX_SND_MSGLEN))
314 return -ECONNRESET;
316 crd->cmdparams = kmalloc(paramlen, GFP_KERNEL);
317 if (unlikely(crd->cmdparams == 0))
318 return -ENOMEM;
321 if (crd->param_read < paramlen) {
322 size_t st_rc;
324 cpy = (paramlen - crd->param_read);
325 if (cpy > len)
326 cpy = len;
328 BUG_ON(crd->cmdparams == 0);
330 st_rc = copy_from_iter(crd->cmdparams +
331 crd->param_read, cpy, &(msg->msg_iter));
333 if (unlikely(st_rc != cpy))
334 return -EFAULT;
336 crd->param_read += cpy;
339 BUG_ON(crd->param_read > paramlen);
341 if (crd->param_read == paramlen) {
342 int rc = cor_rd_parse(crd, cmd, crd->cmdparams, paramlen);
343 if (unlikely(rc != 0))
344 return -ECONNRESET;
346 memset(crd->snd_cmdplen_buf, 0,
347 sizeof(crd->snd_cmdplen_read));
348 crd->snd_cmdplen_read = 0;
349 crd->param_read = 0;
350 kfree(crd->cmdparams);
351 crd->cmdparams = 0;
354 return cpy;
357 static int _cor_rd_sendmsg(struct cor_rdsock *crd, struct msghdr *msg,
358 __u32 len)
360 if (crd->snd_cmdplen_read < 8) {
361 return _cor_rd_sendmsg_hdr(crd, msg, len);
362 } else {
363 return _cor_rd_sendmsg_body(crd, msg, len);
367 int cor_rd_sendmsg(struct socket *sock, struct msghdr *msg, size_t total_len)
369 struct cor_rdsock *crd = (struct cor_rdsock *) sock->sk;
371 int rc = 0;
372 __u32 totalread = 0;
373 __u32 currread = 0;
375 __u32 len;
377 if (unlikely(total_len > 1024 * 1024 * 1024))
378 len = 1024 * 1024 * 1024;
379 else
380 len = (__u32) total_len;
382 if (unlikely(atomic_read(&(crd->connected)) == 0))
383 return -ENOTCONN;
385 mutex_lock(&(crd->sndbuf_lock));
387 while (currread < len) {
388 rc = _cor_rd_sendmsg(crd, msg, len - currread);
389 if (unlikely(rc < 0))
390 goto out;
391 currread += rc;
392 totalread += currread;
395 out:
396 mutex_unlock(&(crd->sndbuf_lock));
398 if (rc >= 0 && totalread != 0) {
399 BUG_ON(totalread > (1024 * 1024 * 1024));
400 rc = totalread;
403 return rc;
406 static void fill_msgbuf_supportedversions(struct cor_rdsock *crd,
407 struct rd_msg *rdm)
409 BUG_ON(rdm->cs != 0);
411 BUG_ON(MAX_MSG_LEN < 16);
413 put_u32(crd->rcvbuf, CRD_KTU_SUPPORTEDVERSIONS);
414 put_u32(crd->rcvbuf + 4, 8); /* len */
415 put_u32(crd->rcvbuf + 8, 0);
416 put_u32(crd->rcvbuf + 12, 0);
418 crd->rcvbuflen = 16;
421 static void fill_msgbuf_connect(struct cor_rdsock *crd, struct rd_msg *rdm)
423 char *remoteaddr;
424 __u32 remoteaddr_len;
426 BUG_ON(rdm->cs == 0);
427 mutex_lock(&(rdm->cs->lock));
428 BUG_ON(rdm->cs->type != CS_TYPE_CONN_MANAGED);
430 remoteaddr = (char *) &(rdm->cs->data.conn_managed.remoteaddr);
431 remoteaddr_len = sizeof(struct cor_sockaddr);
432 BUG_ON(remoteaddr_len != 68);
434 BUG_ON(MAX_MSG_LEN < (16 + remoteaddr_len));
436 put_u32(crd->rcvbuf, CRD_KTU_CONNECT);
437 put_u32(crd->rcvbuf + 4, 8 + remoteaddr_len);
438 put_be64(crd->rcvbuf + 8, rdm->cs->data.conn_managed.cookie);
439 memcpy(crd->rcvbuf + 16, remoteaddr, remoteaddr_len);
441 crd->rcvbuflen = 16 + remoteaddr_len;
442 mutex_unlock(&(rdm->cs->lock));
445 static void _fill_msgbuf(struct cor_rdsock *crd, struct rd_msg *rdm)
447 if (rdm->type == CRD_KTU_SUPPORTEDVERSIONS) {
448 fill_msgbuf_supportedversions(crd, rdm);
449 } else if (rdm->type == CRD_KTU_CONNECT) {
450 fill_msgbuf_connect(crd, rdm);
451 } else {
452 BUG();
456 static int fill_msgbuf(struct socket *sock, struct cor_rdsock *crd,
457 int blocking)
459 int rc = 0;
460 struct rd_msg *rdm = 0;
462 while(1) {
463 mutex_lock(&(rds_lock));
464 if (list_empty(&(crd->rcv_msgs)) == 0)
465 break;
466 atomic_set(&(crd->ready_to_read), 0);
467 mutex_unlock(&(rds_lock));
469 if (blocking == 0)
470 return -EAGAIN;
472 if (wait_event_interruptible(*sk_sleep(sock->sk),
473 atomic_read(&(crd->ready_to_read)) != 0) != 0)
474 return -ERESTARTSYS;
477 rdm = container_of(crd->rcv_msgs.next, struct rd_msg, lh);
478 list_del(&(rdm->lh));
480 if (rdm->cs != 0)
481 list_del(&(rdm->cs_lh));
483 mutex_unlock(&(rds_lock));
485 memset(crd->rcvbuf, 0, sizeof(crd->rcvbuf));
486 crd->rcvbuflen = 0;
487 crd->rcvbufoffset = 0;
489 _fill_msgbuf(crd, rdm);
491 if (rdm->cs != 0) {
492 kref_put(&(rdm->cs->ref), free_sock);
493 rdm->cs = 0;
496 kmem_cache_free(rdmsg_slab, rdm);
498 return rc;
501 int cor_rd_recvmsg(struct socket *sock, struct msghdr *msg, size_t total_len,
502 int flags)
504 int copied = 0;
505 int blocking = (flags & MSG_DONTWAIT) == 0;
507 struct cor_rdsock *crd = (struct cor_rdsock *) sock->sk;
509 __u32 totallen;
511 if (unlikely(total_len > 1024 * 1024 * 1024))
512 totallen = 1024 * 1024 * 1024;
513 else
514 totallen = (__u32) total_len;
517 if (unlikely((flags & MSG_PEEK) != 0))
518 return -EINVAL;
520 if (unlikely(atomic_read(&(crd->connected)) == 0))
521 return -ENOTCONN;
523 mutex_lock(&(crd->rcvbuf_lock));
524 while (copied < totallen) {
525 __u32 len = totallen - copied;
526 size_t st_rc;
528 if (crd->rcvbufoffset == crd->rcvbuflen) {
529 int rc = fill_msgbuf(sock, crd,
530 blocking && copied == 0);
531 if (rc != 0 && copied == 0)
532 copied = rc;
533 if (rc != 0)
534 break;
537 BUG_ON(crd->rcvbufoffset > crd->rcvbuflen);
539 if (len > (crd->rcvbuflen - crd->rcvbufoffset))
540 len = crd->rcvbuflen - crd->rcvbufoffset;
542 st_rc = copy_to_iter(crd->rcvbuf + crd->rcvbufoffset, len,
543 &(msg->msg_iter));
545 if (unlikely(st_rc != len)) {
546 copied = -EFAULT;
547 break;
550 copied += len;
551 crd->rcvbufoffset += len;
553 mutex_unlock(&(crd->rcvbuf_lock));
555 BUG_ON(copied > 0 && unlikely((copied > total_len ||
556 copied > totallen)));
558 return copied;
561 static unsigned int cor_rd_poll(struct file *file, struct socket *sock,
562 poll_table *wait)
564 unsigned int mask = 0;
566 struct cor_rdsock *crd = (struct cor_rdsock *) sock->sk;
568 if (unlikely(atomic_read(&(crd->connected)) == 0))
569 return 0;
571 sock_poll_wait(file, sock, wait);
573 mutex_lock(&(crd->rcvbuf_lock));
574 mutex_lock(&(rds_lock));
576 if (crd->rcvbufoffset != crd->rcvbuflen ||
577 (list_empty(&(crd->rcv_msgs)) == 0))
578 mask |= (POLLIN | POLLRDNORM);
580 mutex_unlock(&(rds_lock));
581 mutex_unlock(&(crd->rcvbuf_lock));
583 mask |= (POLLOUT | POLLWRNORM);
585 return mask;
588 struct proto cor_rd_proto = {
589 .name = "cor_rd",
590 .obj_size = sizeof(struct cor_rdsock),
591 .owner = THIS_MODULE,
594 const struct proto_ops cor_rd_proto_ops = {
595 .family = PF_COR,
596 .owner = THIS_MODULE,
597 .release = cor_rd_socket_release,
598 .bind = cor_rd_socket_bind,
599 .connect = cor_rd_socket_connect,
600 .accept = cor_rd_socket_accept,
601 .listen = cor_rd_socket_listen,
602 .shutdown = cor_rd_socket_shutdown,
603 .ioctl = cor_rd_ioctl,
604 .setsockopt = cor_rd_setsockopt,
605 .getsockopt = cor_rd_getsockopt,
606 #ifdef CONFIG_COMPAT
607 .combat_ioctl = cor_rd_ioctl,
608 .compat_setsockopt = cor_rd_setsockopt,
609 .compat_getsockopt = cor_rd_getsockopt,
610 #endif
611 .sendmsg = cor_rd_sendmsg,
612 .recvmsg = cor_rd_recvmsg,
613 .poll = cor_rd_poll,
615 .socketpair = cor_socket_socketpair,
616 .getname = cor_socket_getname,
617 .mmap = cor_socket_mmap,
619 /* sendpage, splice_read, are optional */
622 int cor_create_rdaemon_sock(struct net *net, struct socket *sock, int protocol,
623 int kern)
625 struct rd_msg *rdm = 0;
626 struct cor_rdsock *newcrd = 0;
628 rdm = kmem_cache_alloc(rdmsg_slab, GFP_KERNEL);
629 if (unlikely(rdm == 0))
630 return -ENOMEM;
632 newcrd = (struct cor_rdsock *) sk_alloc(net, PF_COR, GFP_KERNEL,
633 &cor_rd_proto, kern);
634 if (unlikely(newcrd == 0)) {
635 kmem_cache_free(rdmsg_slab, rdm);
636 return -ENOMEM;
639 sock_init_data(sock, (struct sock *) newcrd);
640 newcrd->sk.sk_protocol = protocol;
641 memset(((char *)newcrd) + sizeof(struct sock), 0,
642 sizeof(struct cor_rdsock) - sizeof(struct sock));
644 atomic_set(&(newcrd->connected), 0);
645 INIT_LIST_HEAD(&(newcrd->socks));
646 mutex_init(&(newcrd->sndbuf_lock));
647 mutex_init(&(newcrd->rcvbuf_lock));
648 atomic_set(&(newcrd->ready_to_read), 0);
649 INIT_LIST_HEAD(&(newcrd->rcv_msgs));
651 mutex_lock(&rds_lock);
652 if (crd != 0) {
653 sock_put((struct sock *) newcrd);
654 mutex_unlock(&rds_lock);
655 kmem_cache_free(rdmsg_slab, rdm);
656 return -EACCES;
658 crd = newcrd;
660 memset(rdm, 0, sizeof(struct rd_msg));
661 rdm->type = CRD_KTU_SUPPORTEDVERSIONS;
662 list_add_tail(&(rdm->lh), &(crd->rcv_msgs));
664 atomic_set(&(newcrd->ready_to_read), 1);
666 mutex_unlock(&rds_lock);
668 sock->state = SS_UNCONNECTED;
669 sock->ops = &cor_rd_proto_ops;
670 sock->sk = (struct sock *) crd;
672 return 0;
675 int rdreq_connect(struct cor_sock *cs)
677 int rc;
679 struct rd_msg *rdm = kmem_cache_alloc(rdmsg_slab, GFP_KERNEL);
681 mutex_lock(&(rds_lock));
682 mutex_lock(&(cs->lock));
684 BUG_ON(cs->type != CS_TYPE_CONN_MANAGED);
685 BUG_ON(cs->data.conn_managed.cookie == 0);
687 if (unlikely(crd == 0 || atomic_read(&(crd->connected)) == 0 ||
688 crd->versioninited == 0)) {
689 rc = -ENETUNREACH;
690 goto out;
693 if (unlikely(rdm == 0)) {
694 rc = -ETIMEDOUT;
695 goto out;
698 memset(rdm, 0, sizeof(struct rd_msg));
700 kref_get(&(cs->ref));
701 list_add_tail(&(rdm->cs_lh), &(cs->data.conn_managed.rd_msgs));
702 rdm->cs = cs;
703 rdm->type = CRD_KTU_CONNECT;
705 if (list_empty(&(crd->rcv_msgs))) {
706 atomic_set(&(crd->ready_to_read), 1);
707 barrier();
708 crd->sk.sk_data_ready(&(crd->sk));
710 list_add_tail(&(rdm->lh), &(crd->rcv_msgs));
711 kref_get(&(cs->ref));
713 kref_get(&(cs->ref));
714 list_add_tail(&(cs->data.conn_managed.crd_lh), &(crd->socks));
715 cs->data.conn_managed.in_crd_list = 1;
717 rc = -EINPROGRESS;
719 out:
720 mutex_unlock(&(cs->lock));
721 mutex_unlock(&(rds_lock));
723 return rc;
726 void cor_usersock_release(struct cor_sock *cs)
728 mutex_lock(&(rds_lock));
729 mutex_lock(&(cs->lock));
731 if (cs->type != CS_TYPE_CONN_MANAGED)
732 goto out;
734 while (list_empty(&(cs->data.conn_managed.rd_msgs)) == 0) {
735 struct rd_msg *rdm = container_of(
736 cs->data.conn_managed.rd_msgs.next,
737 struct rd_msg, cs_lh);
739 list_del(&(rdm->lh));
740 BUG_ON(rdm->cs != cs);
741 list_del(&(rdm->cs_lh));
742 kref_put(&(cs->ref), kreffree_bug);
743 rdm->cs = 0;
744 kmem_cache_free(rdmsg_slab, rdm);
747 if (cs->data.conn_managed.in_crd_list != 0) {
748 list_del(&(cs->data.conn_managed.crd_lh));
749 cs->data.conn_managed.in_crd_list = 0;
750 kref_put(&(cs->ref), kreffree_bug);
753 out:
754 mutex_unlock(&(cs->lock));
755 mutex_unlock(&(rds_lock));
758 int __init cor_rd_init1(void)
760 rdmsg_slab = kmem_cache_create("cor_rdmsg",
761 sizeof(struct rd_msg), 8, 0, 0);
762 if (unlikely(rdmsg_slab == 0))
763 return -ENOMEM;
765 return 0;
768 int __init cor_rd_init2(void)
770 return proto_register(&cor_rd_proto, 1);