checkpatch fixes
[cor.git] / net / cor / sock_rdaemon.c
blob146de0e0ea38713e98a95a0b47006d16cc5ab420
1 /**
2 * Connection oriented routing
3 * Copyright (C) 2007-2021 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include "cor.h"
18 #define MAX_SND_MSGLEN 4096
19 #define MAX_MSG_LEN 256
21 struct cor_rd_msg{
22 struct list_head lh;
24 struct list_head cs_lh;
25 struct cor_sock *cs;
27 __u32 type;
28 union{
29 } msg;
32 struct cor_rdsock{
33 struct sock sk;
35 atomic_t connected;
37 __u8 versioninited;
39 struct list_head socks;
41 struct mutex sndbuf_lock;
42 __u8 snd_cmdplen_read;
43 __u32 param_read;
44 char *cmdparams;
46 atomic_t ready_to_read;
47 struct list_head rcv_msgs; /* protected by rds_lock */
49 struct mutex rcvbuf_lock;
50 __u32 rcvbuflen;
51 __u32 rcvbufoffset;
53 struct{
54 char snd_cmdplen_buf[8];
55 char rcvbuf[MAX_MSG_LEN+8];
56 } user_copy;
59 static struct kmem_cache *cor_rdmsg_slab;
61 static DEFINE_MUTEX(cor_rds_lock);
62 static struct cor_rdsock *cor_crd = 0;
65 int cor_rd_socket_release(struct socket *sock)
67 mutex_lock(&cor_rds_lock);
69 BUG_ON(((struct cor_rdsock *) sock->sk) != cor_crd);
71 cor_config_down();
73 cor_set_interface_config(0, 0, 0);
75 while (list_empty(&(cor_crd->rcv_msgs)) == 0) {
76 struct cor_rd_msg *rdm = container_of(cor_crd->rcv_msgs.next,
77 struct cor_rd_msg, lh);
79 list_del(&(rdm->lh));
80 if (rdm->cs != 0) {
81 list_del(&(rdm->cs_lh));
82 kref_put(&(rdm->cs->ref), cor_free_sock);
83 rdm->cs = 0;
85 kmem_cache_free(cor_rdmsg_slab, rdm);
88 while (list_empty(&(cor_crd->socks)) == 0) {
89 struct cor_sock *cs = container_of(cor_crd->socks.next,
90 struct cor_sock, data.conn_managed.crd_lh);
92 BUG_ON(cs->type != CS_TYPE_CONN_MANAGED);
93 BUG_ON(cs->data.conn_managed.in_crd_list == 0);
94 list_del(&(cs->data.conn_managed.crd_lh));
95 cs->data.conn_managed.in_crd_list = 0;
96 _cor_set_sock_connecterror(cs, ENETUNREACH);
97 kref_put(&(cs->ref), cor_free_sock);
100 if (cor_crd->cmdparams != 0) {
101 kfree(cor_crd->cmdparams);
102 cor_crd->cmdparams = 0;
105 cor_crd = 0;
107 mutex_unlock(&cor_rds_lock);
109 sock_put(sock->sk);
111 return 0;
114 int cor_rd_socket_bind(struct socket *sock, struct sockaddr *saddr,
115 int sockaddr_len)
117 return -EOPNOTSUPP;
120 int cor_rd_socket_connect(struct socket *sock, struct sockaddr *saddr,
121 int sockaddr_len, int flags)
123 struct cor_rdsock *crd = (struct cor_rdsock *) sock->sk;
125 atomic_set(&(crd->connected), 1);
127 lock_sock(sock->sk);
128 sock->state = SS_CONNECTED;
129 release_sock(sock->sk);
130 return 0;
133 int cor_rd_socket_accept(struct socket *sock, struct socket *newsock, int flags,
134 bool kern)
136 return -EOPNOTSUPP;
139 int cor_rd_socket_listen(struct socket *sock, int len)
141 return -EOPNOTSUPP;
144 int cor_rd_socket_shutdown(struct socket *sock, int flags)
146 return 0;
149 int cor_rd_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
151 return -ENOIOCTLCMD;
154 int cor_rd_setsockopt(struct socket *sock, int level,
155 int optname, char __user *optval, unsigned int optlen)
157 return -ENOPROTOOPT;
160 int cor_rd_getsockopt(struct socket *sock, int level,
161 int optname, char __user *optval, int __user *optlen)
163 return -ENOPROTOOPT;
166 static int cor_rd_parse_version(struct cor_rdsock *crd, __u32 cmd,
167 char *param, __u32 paramlen)
169 int rc = 0;
170 __u32 version;
172 mutex_lock(&cor_rds_lock);
174 BUG_ON(crd == 0);
176 if (paramlen != 4)
177 goto err;
179 version = cor_parse_u32(param);
180 if (version != 0)
181 goto err;
183 if (crd->versioninited != 0)
184 goto err;
186 crd->versioninited = 1;
188 if (0) {
189 err:
190 rc = 1;
192 mutex_unlock(&cor_rds_lock);
194 return rc;
197 /* interface_config_lock must be held */
200 static int _cor_rd_parse_up_interfaces(struct cor_rdsock *crd, char *param,
201 __u32 paramlen, __u32 *offset)
203 __u32 num_intf;
204 __u32 i;
205 struct cor_interface_config *newconfig = 0;
207 if (unlikely(*offset + 4 > paramlen))
208 return 1;
210 num_intf = cor_parse_u32(param + *offset);
211 *offset += 4;
213 if (unlikely(num_intf > 65536))
214 return 1;
216 newconfig = kmalloc(num_intf * sizeof(struct cor_interface_config),
217 GFP_KERNEL);
218 if (unlikely(newconfig == 0))
219 return 1;
221 memset(newconfig, 0, num_intf * sizeof(struct cor_interface_config));
223 for (i = 0; i < num_intf; i++) {
224 struct cor_interface_config *newconfig_curr = &(newconfig[i]);
226 if (unlikely(*offset + 4 > paramlen))
227 goto out_err;
229 newconfig_curr->name_len = cor_parse_u32(param + *offset);
230 *offset += 4;
232 if (unlikely(*offset + newconfig_curr->name_len < paramlen))
233 goto out_err;
235 newconfig_curr->name = kmalloc(newconfig_curr->name_len,
236 GFP_KERNEL);
237 if (unlikely(newconfig_curr->name == 0))
238 goto out_err;
240 memcpy(newconfig_curr->name, param + *offset,
241 newconfig_curr->name_len);
242 *offset += newconfig_curr->name_len;
245 cor_set_interface_config(newconfig, num_intf, 0);
247 return 0;
249 out_err:
250 while (i > 0) {
251 struct cor_interface_config *newconfig_curr;
253 i--;
255 newconfig_curr = &(newconfig[i]);
257 BUG_ON(newconfig_curr->name == 0);
258 kfree(newconfig_curr->name);
259 newconfig_curr->name = 0;
261 kfree(newconfig);
262 return 1;
265 static int cor_rd_parse_up(struct cor_rdsock *crd, __u32 cmd,
266 char *param, __u32 paramlen)
268 __u32 offset = 0;
270 __u64 flags;
271 __u8 has_addr = 0;
272 __be64 addr = 0;
274 if (unlikely(paramlen < 8))
275 return 1;
277 flags = cor_parse_u64(param);
278 offset += 8;
280 if ((flags & CRD_UTK_UP_FLAGS_ADDR) != 0) {
281 if (unlikely(paramlen - offset < 8))
282 return 1;
284 has_addr = 1;
285 addr = cor_parse_be64(param + offset);
286 offset += 8;
289 if ((flags & CRD_UTK_UP_FLAGS_INTERFACES) != 0) {
290 if (_cor_rd_parse_up_interfaces(crd, param, paramlen, &offset)
291 != 0) {
292 return 1;
294 } else {
295 cor_set_interface_config(0, 0, 1);
298 if (cor_config_up(has_addr, addr) != 0)
299 return 1;
301 return 0;
304 static int cor_rd_parse_connecterror(struct cor_rdsock *crd, __u32 cmd,
305 char *param, __u32 paramlen)
307 __be64 cookie;
308 __u32 error;
309 int errorno;
311 if (unlikely(paramlen < 12))
312 return 1;
314 cookie = cor_parse_be64(param);
315 error = cor_parse_u32(param + 8);
317 if (error == CRD_UTK_CONNECTERROR_ACCES) {
318 errorno = EACCES;
319 } else if (error == CRD_UTK_CONNECTERROR_NETUNREACH) {
320 errorno = ENETUNREACH;
321 } else if (error == CRD_UTK_CONNECTERROR_TIMEDOUT) {
322 errorno = ETIMEDOUT;
323 } else if (error == CRD_UTK_CONNECTERROR_REFUSED) {
324 errorno = ECONNREFUSED;
325 } else {
326 errorno = ENETUNREACH;
329 cor_set_sock_connecterror(cookie, errorno);
331 return 0;
334 static int cor_rd_parse(struct cor_rdsock *crd, __u32 cmd, char *param,
335 __u32 paramlen)
337 if (unlikely(unlikely(cmd != CRD_UTK_VERSION) &&
338 unlikely(crd->versioninited == 0)))
339 return 1;
341 if (cmd == CRD_UTK_VERSION) {
342 return cor_rd_parse_version(crd, cmd, param, paramlen);
343 } else if (cmd == CRD_UTK_UP) {
344 return cor_rd_parse_up(crd, cmd, param, paramlen);
345 } else if (cmd == CRD_UTK_CONNECTERROR) {
346 return cor_rd_parse_connecterror(crd, cmd, param, paramlen);
347 } else {
348 return 1;
352 static int _cor_rd_sendmsg_hdr(struct cor_rdsock *crd, struct msghdr *msg,
353 __u32 len)
355 __u32 cpy;
356 size_t st_rc;
358 BUG_ON(len == 0);
359 BUG_ON(len > (1024 * 1024 * 1024));
361 BUG_ON(crd->snd_cmdplen_read > 8);
362 cpy = (8 - crd->snd_cmdplen_read);
363 if (unlikely(cpy > len))
364 cpy = len;
366 st_rc = copy_from_iter(crd->user_copy.snd_cmdplen_buf +
367 crd->snd_cmdplen_read, cpy, &(msg->msg_iter));
369 if (unlikely(st_rc != cpy))
370 return -EFAULT;
372 crd->snd_cmdplen_read += cpy;
374 return cpy;
377 static int _cor_rd_sendmsg_body(struct cor_rdsock *crd, struct msghdr *msg,
378 __u32 len)
380 __u32 cpy = 0;
382 __u32 cmd;
383 __u32 paramlen;
385 BUG_ON(len == 0);
386 BUG_ON(len > (1024 * 1024 * 1024));
388 BUG_ON(crd->snd_cmdplen_read != 8);
390 cmd = cor_parse_u32(crd->user_copy.snd_cmdplen_buf);
391 paramlen = cor_parse_u32(crd->user_copy.snd_cmdplen_buf + 4);
393 if (crd->cmdparams == 0 && paramlen != 0) {
394 BUG_ON(crd->param_read != 0);
395 if (unlikely(paramlen > MAX_SND_MSGLEN))
396 return -ECONNRESET;
398 crd->cmdparams = kmalloc(paramlen, GFP_KERNEL);
399 if (unlikely(crd->cmdparams == 0))
400 return -ENOMEM;
403 if (crd->param_read < paramlen) {
404 size_t st_rc;
406 cpy = (paramlen - crd->param_read);
407 if (cpy > len)
408 cpy = len;
410 BUG_ON(crd->cmdparams == 0);
412 st_rc = copy_from_iter(crd->cmdparams +
413 crd->param_read, cpy, &(msg->msg_iter));
415 if (unlikely(st_rc != cpy))
416 return -EFAULT;
418 crd->param_read += cpy;
421 BUG_ON(crd->param_read > paramlen);
423 if (crd->param_read == paramlen) {
424 int rc = cor_rd_parse(crd, cmd, crd->cmdparams, paramlen);
425 if (unlikely(rc != 0))
426 return -ECONNRESET;
428 memset(crd->user_copy.snd_cmdplen_buf, 0,
429 sizeof(crd->snd_cmdplen_read));
430 crd->snd_cmdplen_read = 0;
431 crd->param_read = 0;
432 kfree(crd->cmdparams);
433 crd->cmdparams = 0;
436 return cpy;
439 static int _cor_rd_sendmsg(struct cor_rdsock *crd, struct msghdr *msg,
440 __u32 len)
442 if (crd->snd_cmdplen_read < 8) {
443 return _cor_rd_sendmsg_hdr(crd, msg, len);
444 } else {
445 return _cor_rd_sendmsg_body(crd, msg, len);
449 int cor_rd_sendmsg(struct socket *sock, struct msghdr *msg, size_t total_len)
451 struct cor_rdsock *crd = (struct cor_rdsock *) sock->sk;
453 int rc = 0;
454 __u32 totalread = 0;
455 __u32 currread = 0;
457 __u32 len;
459 if (unlikely(total_len > 1024 * 1024 * 1024))
460 len = 1024 * 1024 * 1024;
461 else
462 len = (__u32) total_len;
464 if (unlikely(atomic_read(&(crd->connected)) == 0))
465 return -ENOTCONN;
467 mutex_lock(&(crd->sndbuf_lock));
469 while (currread < len) {
470 rc = _cor_rd_sendmsg(crd, msg, len - currread);
471 if (unlikely(rc < 0))
472 goto out;
473 currread += rc;
474 totalread += currread;
477 out:
478 mutex_unlock(&(crd->sndbuf_lock));
480 if (rc >= 0 && totalread != 0) {
481 BUG_ON(totalread > (1024 * 1024 * 1024));
482 rc = totalread;
485 return rc;
488 static void cor_fill_msgbuf_supportedversions(struct cor_rdsock *crd,
489 struct cor_rd_msg *rdm)
491 BUG_ON(rdm->cs != 0);
493 BUG_ON(MAX_MSG_LEN < 16);
495 cor_put_u32(crd->user_copy.rcvbuf, CRD_KTU_SUPPORTEDVERSIONS);
496 cor_put_u32(crd->user_copy.rcvbuf + 4, 8); /* len */
497 cor_put_u32(crd->user_copy.rcvbuf + 8, 0);
498 cor_put_u32(crd->user_copy.rcvbuf + 12, 0);
500 crd->rcvbuflen = 16;
503 static void cor_fill_msgbuf_connect(struct cor_rdsock *crd,
504 struct cor_rd_msg *rdm)
506 char *remoteaddr;
507 __u32 remoteaddr_len;
509 BUG_ON(rdm->cs == 0);
510 mutex_lock(&(rdm->cs->lock));
511 BUG_ON(rdm->cs->type != CS_TYPE_CONN_MANAGED);
513 remoteaddr = (char *) &(rdm->cs->data.conn_managed.remoteaddr);
514 remoteaddr_len = sizeof(struct cor_sockaddr);
515 BUILD_BUG_ON(remoteaddr_len != 16);
517 BUG_ON(MAX_MSG_LEN < (20 + remoteaddr_len));
519 cor_put_u32(crd->user_copy.rcvbuf, CRD_KTU_CONNECT);
520 cor_put_u32(crd->user_copy.rcvbuf + 4, 12 + remoteaddr_len);
521 cor_put_be64(crd->user_copy.rcvbuf + 8,
522 rdm->cs->data.conn_managed.cookie);
523 memcpy(crd->user_copy.rcvbuf + 16, remoteaddr, remoteaddr_len);
524 cor_put_u32(crd->user_copy.rcvbuf + 16 + remoteaddr_len,
525 rdm->cs->is_highlatency ?
526 COR_TOS_HIGH_LATENCY : COR_TOS_LOW_LATENCY);
528 crd->rcvbuflen = 20 + remoteaddr_len;
529 mutex_unlock(&(rdm->cs->lock));
532 static void _cor_fill_msgbuf(struct cor_rdsock *crd, struct cor_rd_msg *rdm)
534 if (rdm->type == CRD_KTU_SUPPORTEDVERSIONS) {
535 cor_fill_msgbuf_supportedversions(crd, rdm);
536 } else if (rdm->type == CRD_KTU_CONNECT) {
537 cor_fill_msgbuf_connect(crd, rdm);
538 } else {
539 BUG();
543 static int cor_fill_msgbuf(struct socket *sock, struct cor_rdsock *crd,
544 int blocking)
546 int rc = 0;
547 struct cor_rd_msg *rdm = 0;
549 while (1) {
550 mutex_lock(&cor_rds_lock);
551 if (list_empty(&(crd->rcv_msgs)) == 0)
552 break;
553 atomic_set(&(crd->ready_to_read), 0);
554 mutex_unlock(&cor_rds_lock);
556 if (blocking == 0)
557 return -EAGAIN;
559 if (wait_event_interruptible(*sk_sleep(sock->sk),
560 atomic_read(&(crd->ready_to_read)) != 0) != 0)
561 return -ERESTARTSYS;
564 rdm = container_of(crd->rcv_msgs.next, struct cor_rd_msg, lh);
565 list_del(&(rdm->lh));
567 if (rdm->cs != 0)
568 list_del(&(rdm->cs_lh));
570 mutex_unlock(&cor_rds_lock);
572 memset(crd->user_copy.rcvbuf, 0, sizeof(crd->user_copy.rcvbuf));
573 crd->rcvbuflen = 0;
574 crd->rcvbufoffset = 0;
576 _cor_fill_msgbuf(crd, rdm);
578 if (rdm->cs != 0) {
579 kref_put(&(rdm->cs->ref), cor_free_sock);
580 rdm->cs = 0;
583 kmem_cache_free(cor_rdmsg_slab, rdm);
585 return rc;
588 int cor_rd_recvmsg(struct socket *sock, struct msghdr *msg, size_t total_len,
589 int flags)
591 int copied = 0;
592 int blocking = (flags & MSG_DONTWAIT) == 0;
594 struct cor_rdsock *crd = (struct cor_rdsock *) sock->sk;
596 __u32 totallen;
598 if (unlikely(total_len > 1024 * 1024 * 1024))
599 totallen = 1024 * 1024 * 1024;
600 else
601 totallen = (__u32) total_len;
604 if (unlikely((flags & MSG_PEEK) != 0))
605 return -EINVAL;
607 if (unlikely(atomic_read(&(crd->connected)) == 0))
608 return -ENOTCONN;
610 mutex_lock(&(crd->rcvbuf_lock));
611 while (copied < totallen) {
612 __u32 len = totallen - copied;
613 size_t st_rc;
615 if (crd->rcvbufoffset == crd->rcvbuflen) {
616 int rc = cor_fill_msgbuf(sock, crd,
617 blocking && copied == 0);
618 if (rc != 0 && copied == 0)
619 copied = rc;
620 if (rc != 0)
621 break;
624 BUG_ON(crd->rcvbufoffset > crd->rcvbuflen);
626 if (len > (crd->rcvbuflen - crd->rcvbufoffset))
627 len = crd->rcvbuflen - crd->rcvbufoffset;
629 st_rc = copy_to_iter(crd->user_copy.rcvbuf + crd->rcvbufoffset,
630 len, &(msg->msg_iter));
632 if (unlikely(st_rc != len)) {
633 copied = -EFAULT;
634 break;
637 copied += len;
638 crd->rcvbufoffset += len;
640 mutex_unlock(&(crd->rcvbuf_lock));
642 BUG_ON(copied > 0 && unlikely((copied > total_len ||
643 copied > totallen)));
645 return copied;
648 static unsigned int cor_rd_poll(struct file *file, struct socket *sock,
649 poll_table *wait)
651 unsigned int mask = 0;
653 struct cor_rdsock *crd = (struct cor_rdsock *) sock->sk;
655 if (unlikely(atomic_read(&(crd->connected)) == 0))
656 return 0;
658 sock_poll_wait(file, sock, wait);
660 mutex_lock(&(crd->rcvbuf_lock));
661 mutex_lock(&cor_rds_lock);
663 if (crd->rcvbufoffset != crd->rcvbuflen ||
664 (list_empty(&(crd->rcv_msgs)) == 0))
665 mask |= (POLLIN | POLLRDNORM);
667 mutex_unlock(&cor_rds_lock);
668 mutex_unlock(&(crd->rcvbuf_lock));
670 mask |= (POLLOUT | POLLWRNORM);
672 return mask;
675 struct proto cor_rd_proto = {
676 .name = "cor_rd",
677 .obj_size = sizeof(struct cor_rdsock),
678 .useroffset = offsetof(struct cor_rdsock, user_copy),
679 .usersize = sizeof(((struct cor_rdsock *) 0)->user_copy),
680 .owner = THIS_MODULE,
683 const struct proto_ops cor_rd_proto_ops = {
684 .family = PF_COR,
685 .owner = THIS_MODULE,
686 .release = cor_rd_socket_release,
687 .bind = cor_rd_socket_bind,
688 .connect = cor_rd_socket_connect,
689 .accept = cor_rd_socket_accept,
690 .listen = cor_rd_socket_listen,
691 .shutdown = cor_rd_socket_shutdown,
692 .ioctl = cor_rd_ioctl,
693 .setsockopt = cor_rd_setsockopt,
694 .getsockopt = cor_rd_getsockopt,
695 #ifdef CONFIG_COMPAT
696 .combat_ioctl = cor_rd_ioctl,
697 .compat_setsockopt = cor_rd_setsockopt,
698 .compat_getsockopt = cor_rd_getsockopt,
699 #endif
700 .sendmsg = cor_rd_sendmsg,
701 .recvmsg = cor_rd_recvmsg,
702 .poll = cor_rd_poll,
704 .socketpair = cor_socket_socketpair,
705 .getname = cor_socket_getname,
706 .mmap = cor_socket_mmap,
708 /* sendpage, splice_read, are optional */
711 int cor_create_rdaemon_sock(struct net *net, struct socket *sock, int protocol,
712 int kern)
714 struct cor_rd_msg *rdm = 0;
715 struct cor_rdsock *newcrd = 0;
717 rdm = kmem_cache_alloc(cor_rdmsg_slab, GFP_KERNEL);
718 if (unlikely(rdm == 0))
719 return -ENOMEM;
721 newcrd = (struct cor_rdsock *) sk_alloc(net, PF_COR, GFP_KERNEL,
722 &cor_rd_proto, kern);
723 if (unlikely(newcrd == 0)) {
724 kmem_cache_free(cor_rdmsg_slab, rdm);
725 return -ENOMEM;
728 sock_init_data(sock, (struct sock *) newcrd);
729 newcrd->sk.sk_protocol = protocol;
730 memset(((char *)newcrd) + sizeof(struct sock), 0,
731 sizeof(struct cor_rdsock) - sizeof(struct sock));
733 atomic_set(&(newcrd->connected), 0);
734 INIT_LIST_HEAD(&(newcrd->socks));
735 mutex_init(&(newcrd->sndbuf_lock));
736 mutex_init(&(newcrd->rcvbuf_lock));
737 atomic_set(&(newcrd->ready_to_read), 0);
738 INIT_LIST_HEAD(&(newcrd->rcv_msgs));
740 mutex_lock(&cor_rds_lock);
741 if (cor_crd != 0) {
742 sock_put((struct sock *) newcrd);
743 mutex_unlock(&cor_rds_lock);
744 kmem_cache_free(cor_rdmsg_slab, rdm);
745 return -EACCES;
747 cor_crd = newcrd;
749 memset(rdm, 0, sizeof(struct cor_rd_msg));
750 rdm->type = CRD_KTU_SUPPORTEDVERSIONS;
751 list_add_tail(&(rdm->lh), &(cor_crd->rcv_msgs));
753 atomic_set(&(newcrd->ready_to_read), 1);
755 mutex_unlock(&cor_rds_lock);
757 sock->state = SS_UNCONNECTED;
758 sock->ops = &cor_rd_proto_ops;
759 sock->sk = (struct sock *) cor_crd;
761 return 0;
764 int cor_rdreq_connect(struct cor_sock *cs)
766 int rc;
768 struct cor_rd_msg *rdm = kmem_cache_alloc(cor_rdmsg_slab, GFP_KERNEL);
770 mutex_lock(&cor_rds_lock);
771 mutex_lock(&(cs->lock));
773 BUG_ON(cs->type != CS_TYPE_CONN_MANAGED);
774 BUG_ON(cs->data.conn_managed.cookie == 0);
776 if (unlikely(cor_crd == 0 || atomic_read(&(cor_crd->connected)) == 0 ||
777 cor_crd->versioninited == 0)) {
778 rc = -ENETUNREACH;
779 goto out;
782 if (unlikely(rdm == 0)) {
783 rc = -ETIMEDOUT;
784 goto out;
787 memset(rdm, 0, sizeof(struct cor_rd_msg));
789 kref_get(&(cs->ref));
790 list_add_tail(&(rdm->cs_lh), &(cs->data.conn_managed.rd_msgs));
791 rdm->cs = cs;
792 rdm->type = CRD_KTU_CONNECT;
794 if (list_empty(&(cor_crd->rcv_msgs))) {
795 atomic_set(&(cor_crd->ready_to_read), 1);
796 barrier();
797 cor_crd->sk.sk_data_ready(&(cor_crd->sk));
799 list_add_tail(&(rdm->lh), &(cor_crd->rcv_msgs));
800 kref_get(&(cs->ref));
802 kref_get(&(cs->ref));
803 list_add_tail(&(cs->data.conn_managed.crd_lh), &(cor_crd->socks));
804 cs->data.conn_managed.in_crd_list = 1;
806 rc = -EINPROGRESS;
808 out:
809 mutex_unlock(&(cs->lock));
810 mutex_unlock(&cor_rds_lock);
812 return rc;
815 void cor_usersock_release(struct cor_sock *cs)
817 mutex_lock(&cor_rds_lock);
818 mutex_lock(&(cs->lock));
820 if (cs->type != CS_TYPE_CONN_MANAGED)
821 goto out;
823 while (list_empty(&(cs->data.conn_managed.rd_msgs)) == 0) {
824 struct cor_rd_msg *rdm = container_of(
825 cs->data.conn_managed.rd_msgs.next,
826 struct cor_rd_msg, cs_lh);
828 list_del(&(rdm->lh));
829 BUG_ON(rdm->cs != cs);
830 list_del(&(rdm->cs_lh));
831 kref_put(&(cs->ref), cor_kreffree_bug);
832 rdm->cs = 0;
833 kmem_cache_free(cor_rdmsg_slab, rdm);
836 if (cs->data.conn_managed.in_crd_list != 0) {
837 list_del(&(cs->data.conn_managed.crd_lh));
838 cs->data.conn_managed.in_crd_list = 0;
839 kref_put(&(cs->ref), cor_kreffree_bug);
842 out:
843 mutex_unlock(&(cs->lock));
844 mutex_unlock(&cor_rds_lock);
847 int __init cor_rd_init1(void)
849 cor_rdmsg_slab = kmem_cache_create("cor_rdmsg",
850 sizeof(struct cor_rd_msg), 8, 0, 0);
851 if (unlikely(cor_rdmsg_slab == 0))
852 return -ENOMEM;
854 return 0;
857 int __init cor_rd_init2(void)
859 return proto_register(&cor_rd_proto, 1);
862 void __exit cor_rd_exit1(void)
864 proto_unregister(&cor_rd_proto);
867 void __exit cor_rd_exit2(void)
869 kmem_cache_destroy(cor_rdmsg_slab);
870 cor_rdmsg_slab = 0;
873 MODULE_LICENSE("GPL");