support hardened usercopy
[cor.git] / net / cor / sock_rdaemon.c
blob73a49db13cf82dd12e6bd21a25957cc391396938
1 /**
2 * Connection oriented routing
3 * Copyright (C) 2007-2021 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include "cor.h"
23 #define MAX_SND_MSGLEN 4096
24 #define MAX_MSG_LEN 256
26 struct cor_rd_msg{
27 struct list_head lh;
29 struct list_head cs_lh;
30 struct cor_sock *cs;
32 __u32 type;
33 union{
34 }msg;
37 struct cor_rdsock{
38 struct sock sk;
40 atomic_t connected;
42 __u8 versioninited;
44 struct list_head socks;
46 struct mutex sndbuf_lock;
47 __u8 snd_cmdplen_read;
48 __u32 param_read;
49 char *cmdparams;
51 atomic_t ready_to_read;
52 struct list_head rcv_msgs; /* protected by rds_lock */
54 struct mutex rcvbuf_lock;
55 __u32 rcvbuflen;
56 __u32 rcvbufoffset;
58 struct{
59 char snd_cmdplen_buf[8];
60 char rcvbuf[MAX_MSG_LEN+8];
61 }user_copy;
64 static struct kmem_cache *cor_rdmsg_slab;
66 static DEFINE_MUTEX(cor_rds_lock);
67 static struct cor_rdsock *cor_crd = 0;
70 int cor_rd_socket_release(struct socket *sock)
72 mutex_lock(&cor_rds_lock);
74 BUG_ON(((struct cor_rdsock *) sock->sk) != cor_crd);
76 cor_config_down();
78 cor_set_interface_config(0, 0, 0);
80 while (list_empty(&(cor_crd->rcv_msgs)) == 0) {
81 struct cor_rd_msg *rdm = container_of(cor_crd->rcv_msgs.next,
82 struct cor_rd_msg, lh);
84 list_del(&(rdm->lh));
85 if (rdm->cs != 0) {
86 list_del(&(rdm->cs_lh));
87 kref_put(&(rdm->cs->ref), cor_free_sock);
88 rdm->cs = 0;
90 kmem_cache_free(cor_rdmsg_slab, rdm);
93 while (list_empty(&(cor_crd->socks)) == 0) {
94 struct cor_sock *cs = container_of(cor_crd->socks.next,
95 struct cor_sock, data.conn_managed.crd_lh);
97 BUG_ON(cs->type != CS_TYPE_CONN_MANAGED);
98 BUG_ON(cs->data.conn_managed.in_crd_list == 0);
99 list_del(&(cs->data.conn_managed.crd_lh));
100 cs->data.conn_managed.in_crd_list = 0;
101 _cor_set_sock_connecterror(cs, -ENETUNREACH);
102 kref_put(&(cs->ref), cor_free_sock);
105 if (cor_crd->cmdparams != 0) {
106 kfree(cor_crd->cmdparams);
107 cor_crd->cmdparams = 0;
110 cor_crd = 0;
112 mutex_unlock(&cor_rds_lock);
114 sock_put(sock->sk);
116 return 0;
119 int cor_rd_socket_bind(struct socket *sock, struct sockaddr *saddr,
120 int sockaddr_len)
122 return -EOPNOTSUPP;
125 int cor_rd_socket_connect(struct socket *sock, struct sockaddr *saddr,
126 int sockaddr_len, int flags)
128 struct cor_rdsock *crd = (struct cor_rdsock *) sock->sk;
130 atomic_set(&(crd->connected), 1);
132 lock_sock(sock->sk);
133 sock->state = SS_CONNECTED;
134 release_sock(sock->sk);
135 return 0;
138 int cor_rd_socket_accept(struct socket *sock, struct socket *newsock, int flags,
139 bool kern)
141 return -EOPNOTSUPP;
144 int cor_rd_socket_listen(struct socket *sock, int len)
146 return -EOPNOTSUPP;
149 int cor_rd_socket_shutdown(struct socket *sock, int flags)
151 return 0;
154 int cor_rd_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
156 return -ENOIOCTLCMD;
159 int cor_rd_setsockopt(struct socket *sock, int level,
160 int optname, char __user *optval, unsigned int optlen)
162 return -ENOPROTOOPT;
165 int cor_rd_getsockopt(struct socket *sock, int level,
166 int optname, char __user *optval, int __user *optlen)
168 return -ENOPROTOOPT;
171 static int cor_rd_parse_version(struct cor_rdsock *crd, __u32 cmd,
172 char *param, __u32 paramlen)
174 int rc = 0;
175 __u32 version;
177 mutex_lock(&cor_rds_lock);
179 BUG_ON(crd == 0);
181 if (paramlen != 4)
182 goto err;
184 version = cor_parse_u32(param);
185 if (version != 0)
186 goto err;
188 if (crd->versioninited != 0)
189 goto err;
191 crd->versioninited = 1;
193 if (0) {
194 err:
195 rc = 1;
197 mutex_unlock(&cor_rds_lock);
199 return rc;
202 /* interface_config_lock must be held */
205 static int _cor_rd_parse_up_interfaces(struct cor_rdsock *crd, char *param,
206 __u32 paramlen, __u32 *offset)
208 __u32 num_intf;
209 __u32 i;
210 struct cor_interface_config *newconfig = 0;
212 if (unlikely(*offset + 4 > paramlen))
213 return 1;
215 num_intf = cor_parse_u32(param + *offset);
216 *offset += 4;
218 if (unlikely(num_intf > 65536))
219 return 1;
221 newconfig = kmalloc(num_intf * sizeof(struct cor_interface_config),
222 GFP_KERNEL);
223 if (unlikely(newconfig == 0))
224 return 1;
226 memset(newconfig, 0, num_intf * sizeof(struct cor_interface_config));
228 for (i=0;i<num_intf;i++) {
229 struct cor_interface_config *newconfig_curr = &(newconfig[i]);
231 if (unlikely(*offset + 4 > paramlen))
232 goto out_err;
234 newconfig_curr->name_len = cor_parse_u32(param + *offset);
235 *offset += 4;
237 if (unlikely(*offset + newconfig_curr->name_len < paramlen))
238 goto out_err;
240 newconfig_curr->name = kmalloc(newconfig_curr->name_len,
241 GFP_KERNEL);
242 if (unlikely(newconfig_curr->name == 0))
243 goto out_err;
245 memcpy(newconfig_curr->name, param + *offset,
246 newconfig_curr->name_len);
247 *offset += newconfig_curr->name_len;
250 cor_set_interface_config(newconfig, num_intf, 0);
252 return 0;
254 out_err:
255 while (i>0) {
256 struct cor_interface_config *newconfig_curr;
258 i--;
260 newconfig_curr = &(newconfig[i]);
262 BUG_ON(newconfig_curr->name == 0);
263 kfree(newconfig_curr->name);
264 newconfig_curr->name = 0;
266 kfree(newconfig);
267 return 1;
270 static int cor_rd_parse_up(struct cor_rdsock *crd, __u32 cmd,
271 char *param, __u32 paramlen)
273 __u32 offset = 0;
275 __u64 flags;
276 __u32 addrlen;
277 char *addr = 0;
279 if (unlikely(paramlen < 12))
280 return 1;
282 flags = cor_parse_u64(param);
283 offset += 8;
285 addrlen = cor_parse_u32(param + offset);
286 offset += 4;
288 if (addrlen > 0) {
289 if (unlikely(unlikely(addrlen > 64) ||
290 unlikely(offset + addrlen > paramlen)))
291 return 1;
292 addr = param + offset;
293 offset += addrlen;
296 if ((flags & CRD_UTK_UP_FLAGS_INTERFACES) != 0) {
297 if (_cor_rd_parse_up_interfaces(crd, param, paramlen, &offset)
298 != 0) {
299 return 1;
301 } else {
302 cor_set_interface_config(0, 0, 1);
305 if (cor_config_up(addr, addrlen) != 0)
306 return 1;
308 return 0;
311 static int cor_rd_parse_connecterror(struct cor_rdsock *crd, __u32 cmd,
312 char *param, __u32 paramlen)
314 __be64 cookie;
315 __u32 error;
316 int errorno;
318 if (unlikely(paramlen < 12))
319 return 1;
321 cookie = cor_parse_be64(param);
322 error = cor_parse_u32(param + 8);
324 if (error == CRD_UTK_CONNECTERROR_ACCES) {
325 errorno = -EACCES;
326 } else if (error == CRD_UTK_CONNECTERROR_NETUNREACH) {
327 errorno = -ENETUNREACH;
328 } else if (error == CRD_UTK_CONNECTERROR_TIMEDOUT) {
329 errorno = -ETIMEDOUT;
330 } else if (error == CRD_UTK_CONNECTERROR_REFUSED) {
331 errorno = -ECONNREFUSED;
332 } else {
333 errorno = -ENETUNREACH;
336 cor_set_sock_connecterror(cookie, errorno);
338 return 0;
341 static int cor_rd_parse(struct cor_rdsock *crd, __u32 cmd, char *param,
342 __u32 paramlen)
344 if (unlikely(unlikely(cmd != CRD_UTK_VERSION) &&
345 unlikely(crd->versioninited == 0)))
346 return 1;
348 if (cmd == CRD_UTK_VERSION) {
349 return cor_rd_parse_version(crd, cmd, param, paramlen);
350 } else if (cmd == CRD_UTK_UP) {
351 return cor_rd_parse_up(crd, cmd, param, paramlen);
352 } else if (cmd == CRD_UTK_CONNECTERROR) {
353 return cor_rd_parse_connecterror(crd, cmd, param, paramlen);
354 } else {
355 return 1;
359 static int _cor_rd_sendmsg_hdr(struct cor_rdsock *crd, struct msghdr *msg,
360 __u32 len)
362 __u32 cpy;
363 size_t st_rc;
365 BUG_ON(len == 0);
366 BUG_ON(len > (1024 * 1024 * 1024));
368 BUG_ON(crd->snd_cmdplen_read > 8);
369 cpy = (8 - crd->snd_cmdplen_read);
370 if (unlikely(cpy > len))
371 cpy = len;
373 st_rc = copy_from_iter(crd->user_copy.snd_cmdplen_buf +
374 crd->snd_cmdplen_read, cpy, &(msg->msg_iter));
376 if (unlikely(st_rc != cpy))
377 return -EFAULT;
379 crd->snd_cmdplen_read += cpy;
381 return cpy;
384 static int _cor_rd_sendmsg_body(struct cor_rdsock *crd, struct msghdr *msg,
385 __u32 len)
387 __u32 cpy = 0;
389 __u32 cmd;
390 __u32 paramlen;
392 BUG_ON(len == 0);
393 BUG_ON(len > (1024 * 1024 * 1024));
395 BUG_ON(crd->snd_cmdplen_read != 8);
397 cmd = cor_parse_u32(crd->user_copy.snd_cmdplen_buf);
398 paramlen = cor_parse_u32(crd->user_copy.snd_cmdplen_buf + 4);
400 if (crd->cmdparams == 0 && paramlen != 0) {
401 BUG_ON(crd->param_read != 0);
402 if (unlikely(paramlen > MAX_SND_MSGLEN))
403 return -ECONNRESET;
405 crd->cmdparams = kmalloc(paramlen, GFP_KERNEL);
406 if (unlikely(crd->cmdparams == 0))
407 return -ENOMEM;
410 if (crd->param_read < paramlen) {
411 size_t st_rc;
413 cpy = (paramlen - crd->param_read);
414 if (cpy > len)
415 cpy = len;
417 BUG_ON(crd->cmdparams == 0);
419 st_rc = copy_from_iter(crd->cmdparams +
420 crd->param_read, cpy, &(msg->msg_iter));
422 if (unlikely(st_rc != cpy))
423 return -EFAULT;
425 crd->param_read += cpy;
428 BUG_ON(crd->param_read > paramlen);
430 if (crd->param_read == paramlen) {
431 int rc = cor_rd_parse(crd, cmd, crd->cmdparams, paramlen);
432 if (unlikely(rc != 0))
433 return -ECONNRESET;
435 memset(crd->user_copy.snd_cmdplen_buf, 0,
436 sizeof(crd->snd_cmdplen_read));
437 crd->snd_cmdplen_read = 0;
438 crd->param_read = 0;
439 kfree(crd->cmdparams);
440 crd->cmdparams = 0;
443 return cpy;
446 static int _cor_rd_sendmsg(struct cor_rdsock *crd, struct msghdr *msg,
447 __u32 len)
449 if (crd->snd_cmdplen_read < 8) {
450 return _cor_rd_sendmsg_hdr(crd, msg, len);
451 } else {
452 return _cor_rd_sendmsg_body(crd, msg, len);
456 int cor_rd_sendmsg(struct socket *sock, struct msghdr *msg, size_t total_len)
458 struct cor_rdsock *crd = (struct cor_rdsock *) sock->sk;
460 int rc = 0;
461 __u32 totalread = 0;
462 __u32 currread = 0;
464 __u32 len;
466 if (unlikely(total_len > 1024 * 1024 * 1024))
467 len = 1024 * 1024 * 1024;
468 else
469 len = (__u32) total_len;
471 if (unlikely(atomic_read(&(crd->connected)) == 0))
472 return -ENOTCONN;
474 mutex_lock(&(crd->sndbuf_lock));
476 while (currread < len) {
477 rc = _cor_rd_sendmsg(crd, msg, len - currread);
478 if (unlikely(rc < 0))
479 goto out;
480 currread += rc;
481 totalread += currread;
484 out:
485 mutex_unlock(&(crd->sndbuf_lock));
487 if (rc >= 0 && totalread != 0) {
488 BUG_ON(totalread > (1024 * 1024 * 1024));
489 rc = totalread;
492 return rc;
495 static void cor_fill_msgbuf_supportedversions(struct cor_rdsock *crd,
496 struct cor_rd_msg *rdm)
498 BUG_ON(rdm->cs != 0);
500 BUG_ON(MAX_MSG_LEN < 16);
502 cor_put_u32(crd->user_copy.rcvbuf, CRD_KTU_SUPPORTEDVERSIONS);
503 cor_put_u32(crd->user_copy.rcvbuf + 4, 8); /* len */
504 cor_put_u32(crd->user_copy.rcvbuf + 8, 0);
505 cor_put_u32(crd->user_copy.rcvbuf + 12, 0);
507 crd->rcvbuflen = 16;
510 static void cor_fill_msgbuf_connect(struct cor_rdsock *crd,
511 struct cor_rd_msg *rdm)
513 char *remoteaddr;
514 __u32 remoteaddr_len;
516 BUG_ON(rdm->cs == 0);
517 mutex_lock(&(rdm->cs->lock));
518 BUG_ON(rdm->cs->type != CS_TYPE_CONN_MANAGED);
520 remoteaddr = (char *) &(rdm->cs->data.conn_managed.remoteaddr);
521 remoteaddr_len = sizeof(struct cor_sockaddr);
522 BUG_ON(remoteaddr_len != 72);
524 BUG_ON(MAX_MSG_LEN < (20 + remoteaddr_len));
526 cor_put_u32(crd->user_copy.rcvbuf, CRD_KTU_CONNECT);
527 cor_put_u32(crd->user_copy.rcvbuf + 4, 12 + remoteaddr_len);
528 cor_put_be64(crd->user_copy.rcvbuf + 8,
529 rdm->cs->data.conn_managed.cookie);
530 memcpy(crd->user_copy.rcvbuf + 16, remoteaddr, remoteaddr_len);
531 cor_put_u32(crd->user_copy.rcvbuf + 16 + remoteaddr_len,
532 rdm->cs->is_highlatency ?
533 COR_TOS_HIGH_LATENCY : COR_TOS_LOW_LATENCY);
535 crd->rcvbuflen = 20 + remoteaddr_len;
536 mutex_unlock(&(rdm->cs->lock));
539 static void _cor_fill_msgbuf(struct cor_rdsock *crd, struct cor_rd_msg *rdm)
541 if (rdm->type == CRD_KTU_SUPPORTEDVERSIONS) {
542 cor_fill_msgbuf_supportedversions(crd, rdm);
543 } else if (rdm->type == CRD_KTU_CONNECT) {
544 cor_fill_msgbuf_connect(crd, rdm);
545 } else {
546 BUG();
550 static int cor_fill_msgbuf(struct socket *sock, struct cor_rdsock *crd,
551 int blocking)
553 int rc = 0;
554 struct cor_rd_msg *rdm = 0;
556 while(1) {
557 mutex_lock(&cor_rds_lock);
558 if (list_empty(&(crd->rcv_msgs)) == 0)
559 break;
560 atomic_set(&(crd->ready_to_read), 0);
561 mutex_unlock(&cor_rds_lock);
563 if (blocking == 0)
564 return -EAGAIN;
566 if (wait_event_interruptible(*sk_sleep(sock->sk),
567 atomic_read(&(crd->ready_to_read)) != 0) != 0)
568 return -ERESTARTSYS;
571 rdm = container_of(crd->rcv_msgs.next, struct cor_rd_msg, lh);
572 list_del(&(rdm->lh));
574 if (rdm->cs != 0)
575 list_del(&(rdm->cs_lh));
577 mutex_unlock(&cor_rds_lock);
579 memset(crd->user_copy.rcvbuf, 0, sizeof(crd->user_copy.rcvbuf));
580 crd->rcvbuflen = 0;
581 crd->rcvbufoffset = 0;
583 _cor_fill_msgbuf(crd, rdm);
585 if (rdm->cs != 0) {
586 kref_put(&(rdm->cs->ref), cor_free_sock);
587 rdm->cs = 0;
590 kmem_cache_free(cor_rdmsg_slab, rdm);
592 return rc;
595 int cor_rd_recvmsg(struct socket *sock, struct msghdr *msg, size_t total_len,
596 int flags)
598 int copied = 0;
599 int blocking = (flags & MSG_DONTWAIT) == 0;
601 struct cor_rdsock *crd = (struct cor_rdsock *) sock->sk;
603 __u32 totallen;
605 if (unlikely(total_len > 1024 * 1024 * 1024))
606 totallen = 1024 * 1024 * 1024;
607 else
608 totallen = (__u32) total_len;
611 if (unlikely((flags & MSG_PEEK) != 0))
612 return -EINVAL;
614 if (unlikely(atomic_read(&(crd->connected)) == 0))
615 return -ENOTCONN;
617 mutex_lock(&(crd->rcvbuf_lock));
618 while (copied < totallen) {
619 __u32 len = totallen - copied;
620 size_t st_rc;
622 if (crd->rcvbufoffset == crd->rcvbuflen) {
623 int rc = cor_fill_msgbuf(sock, crd,
624 blocking && copied == 0);
625 if (rc != 0 && copied == 0)
626 copied = rc;
627 if (rc != 0)
628 break;
631 BUG_ON(crd->rcvbufoffset > crd->rcvbuflen);
633 if (len > (crd->rcvbuflen - crd->rcvbufoffset))
634 len = crd->rcvbuflen - crd->rcvbufoffset;
636 st_rc = copy_to_iter(crd->user_copy.rcvbuf + crd->rcvbufoffset,
637 len, &(msg->msg_iter));
639 if (unlikely(st_rc != len)) {
640 copied = -EFAULT;
641 break;
644 copied += len;
645 crd->rcvbufoffset += len;
647 mutex_unlock(&(crd->rcvbuf_lock));
649 BUG_ON(copied > 0 && unlikely((copied > total_len ||
650 copied > totallen)));
652 return copied;
655 static unsigned int cor_rd_poll(struct file *file, struct socket *sock,
656 poll_table *wait)
658 unsigned int mask = 0;
660 struct cor_rdsock *crd = (struct cor_rdsock *) sock->sk;
662 if (unlikely(atomic_read(&(crd->connected)) == 0))
663 return 0;
665 sock_poll_wait(file, sock, wait);
667 mutex_lock(&(crd->rcvbuf_lock));
668 mutex_lock(&cor_rds_lock);
670 if (crd->rcvbufoffset != crd->rcvbuflen ||
671 (list_empty(&(crd->rcv_msgs)) == 0))
672 mask |= (POLLIN | POLLRDNORM);
674 mutex_unlock(&cor_rds_lock);
675 mutex_unlock(&(crd->rcvbuf_lock));
677 mask |= (POLLOUT | POLLWRNORM);
679 return mask;
682 struct proto cor_rd_proto = {
683 .name = "cor_rd",
684 .obj_size = sizeof(struct cor_rdsock),
685 .useroffset = offsetof(struct cor_rdsock, user_copy),
686 .usersize = sizeof(((struct cor_rdsock *) 0)->user_copy),
687 .owner = THIS_MODULE,
690 const struct proto_ops cor_rd_proto_ops = {
691 .family = PF_COR,
692 .owner = THIS_MODULE,
693 .release = cor_rd_socket_release,
694 .bind = cor_rd_socket_bind,
695 .connect = cor_rd_socket_connect,
696 .accept = cor_rd_socket_accept,
697 .listen = cor_rd_socket_listen,
698 .shutdown = cor_rd_socket_shutdown,
699 .ioctl = cor_rd_ioctl,
700 .setsockopt = cor_rd_setsockopt,
701 .getsockopt = cor_rd_getsockopt,
702 #ifdef CONFIG_COMPAT
703 .combat_ioctl = cor_rd_ioctl,
704 .compat_setsockopt = cor_rd_setsockopt,
705 .compat_getsockopt = cor_rd_getsockopt,
706 #endif
707 .sendmsg = cor_rd_sendmsg,
708 .recvmsg = cor_rd_recvmsg,
709 .poll = cor_rd_poll,
711 .socketpair = cor_socket_socketpair,
712 .getname = cor_socket_getname,
713 .mmap = cor_socket_mmap,
715 /* sendpage, splice_read, are optional */
718 int cor_create_rdaemon_sock(struct net *net, struct socket *sock, int protocol,
719 int kern)
721 struct cor_rd_msg *rdm = 0;
722 struct cor_rdsock *newcrd = 0;
724 rdm = kmem_cache_alloc(cor_rdmsg_slab, GFP_KERNEL);
725 if (unlikely(rdm == 0))
726 return -ENOMEM;
728 newcrd = (struct cor_rdsock *) sk_alloc(net, PF_COR, GFP_KERNEL,
729 &cor_rd_proto, kern);
730 if (unlikely(newcrd == 0)) {
731 kmem_cache_free(cor_rdmsg_slab, rdm);
732 return -ENOMEM;
735 sock_init_data(sock, (struct sock *) newcrd);
736 newcrd->sk.sk_protocol = protocol;
737 memset(((char *)newcrd) + sizeof(struct sock), 0,
738 sizeof(struct cor_rdsock) - sizeof(struct sock));
740 atomic_set(&(newcrd->connected), 0);
741 INIT_LIST_HEAD(&(newcrd->socks));
742 mutex_init(&(newcrd->sndbuf_lock));
743 mutex_init(&(newcrd->rcvbuf_lock));
744 atomic_set(&(newcrd->ready_to_read), 0);
745 INIT_LIST_HEAD(&(newcrd->rcv_msgs));
747 mutex_lock(&cor_rds_lock);
748 if (cor_crd != 0) {
749 sock_put((struct sock *) newcrd);
750 mutex_unlock(&cor_rds_lock);
751 kmem_cache_free(cor_rdmsg_slab, rdm);
752 return -EACCES;
754 cor_crd = newcrd;
756 memset(rdm, 0, sizeof(struct cor_rd_msg));
757 rdm->type = CRD_KTU_SUPPORTEDVERSIONS;
758 list_add_tail(&(rdm->lh), &(cor_crd->rcv_msgs));
760 atomic_set(&(newcrd->ready_to_read), 1);
762 mutex_unlock(&cor_rds_lock);
764 sock->state = SS_UNCONNECTED;
765 sock->ops = &cor_rd_proto_ops;
766 sock->sk = (struct sock *) cor_crd;
768 return 0;
771 int cor_rdreq_connect(struct cor_sock *cs)
773 int rc;
775 struct cor_rd_msg *rdm = kmem_cache_alloc(cor_rdmsg_slab, GFP_KERNEL);
777 mutex_lock(&cor_rds_lock);
778 mutex_lock(&(cs->lock));
780 BUG_ON(cs->type != CS_TYPE_CONN_MANAGED);
781 BUG_ON(cs->data.conn_managed.cookie == 0);
783 if (unlikely(cor_crd == 0 || atomic_read(&(cor_crd->connected)) == 0 ||
784 cor_crd->versioninited == 0)) {
785 rc = -ENETUNREACH;
786 goto out;
789 if (unlikely(rdm == 0)) {
790 rc = -ETIMEDOUT;
791 goto out;
794 memset(rdm, 0, sizeof(struct cor_rd_msg));
796 kref_get(&(cs->ref));
797 list_add_tail(&(rdm->cs_lh), &(cs->data.conn_managed.rd_msgs));
798 rdm->cs = cs;
799 rdm->type = CRD_KTU_CONNECT;
801 if (list_empty(&(cor_crd->rcv_msgs))) {
802 atomic_set(&(cor_crd->ready_to_read), 1);
803 barrier();
804 cor_crd->sk.sk_data_ready(&(cor_crd->sk));
806 list_add_tail(&(rdm->lh), &(cor_crd->rcv_msgs));
807 kref_get(&(cs->ref));
809 kref_get(&(cs->ref));
810 list_add_tail(&(cs->data.conn_managed.crd_lh), &(cor_crd->socks));
811 cs->data.conn_managed.in_crd_list = 1;
813 rc = -EINPROGRESS;
815 out:
816 mutex_unlock(&(cs->lock));
817 mutex_unlock(&cor_rds_lock);
819 return rc;
822 void cor_usersock_release(struct cor_sock *cs)
824 mutex_lock(&cor_rds_lock);
825 mutex_lock(&(cs->lock));
827 if (cs->type != CS_TYPE_CONN_MANAGED)
828 goto out;
830 while (list_empty(&(cs->data.conn_managed.rd_msgs)) == 0) {
831 struct cor_rd_msg *rdm = container_of(
832 cs->data.conn_managed.rd_msgs.next,
833 struct cor_rd_msg, cs_lh);
835 list_del(&(rdm->lh));
836 BUG_ON(rdm->cs != cs);
837 list_del(&(rdm->cs_lh));
838 kref_put(&(cs->ref), cor_kreffree_bug);
839 rdm->cs = 0;
840 kmem_cache_free(cor_rdmsg_slab, rdm);
843 if (cs->data.conn_managed.in_crd_list != 0) {
844 list_del(&(cs->data.conn_managed.crd_lh));
845 cs->data.conn_managed.in_crd_list = 0;
846 kref_put(&(cs->ref), cor_kreffree_bug);
849 out:
850 mutex_unlock(&(cs->lock));
851 mutex_unlock(&cor_rds_lock);
854 int __init cor_rd_init1(void)
856 cor_rdmsg_slab = kmem_cache_create("cor_rdmsg",
857 sizeof(struct cor_rd_msg), 8, 0, 0);
858 if (unlikely(cor_rdmsg_slab == 0))
859 return -ENOMEM;
861 return 0;
864 int __init cor_rd_init2(void)
866 return proto_register(&cor_rd_proto, 1);
869 void __exit cor_rd_exit1(void)
871 proto_unregister(&cor_rd_proto);
874 void __exit cor_rd_exit2(void)
876 kmem_cache_destroy(cor_rdmsg_slab);
877 cor_rdmsg_slab = 0;
880 MODULE_LICENSE("GPL");