add conn_src_sock.c+conn_trgt_sock.c, remove credits.c
[cor.git] / net / cor / sock_rdaemon.c
blob5c4fdcc11a6c15ec790a43b8492a2eb10e9bb998
1 /**
2 * Connection oriented routing
3 * Copyright (C) 2007-2021 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include "cor.h"
23 #define MAX_SND_MSGLEN 4096
24 #define MAX_MSG_LEN 256
26 struct cor_rd_msg{
27 struct list_head lh;
29 struct list_head cs_lh;
30 struct cor_sock *cs;
32 __u32 type;
33 union{
34 }msg;
37 struct cor_rdsock{
38 struct sock sk;
40 atomic_t connected;
42 __u8 versioninited;
44 struct list_head socks;
46 struct mutex sndbuf_lock;
47 char snd_cmdplen_buf[8];
48 __u8 snd_cmdplen_read;
49 __u32 param_read;
50 char *cmdparams;
52 atomic_t ready_to_read;
53 struct list_head rcv_msgs; /* protected by rds_lock */
55 struct mutex rcvbuf_lock;
56 char rcvbuf[MAX_MSG_LEN+8];
57 __u32 rcvbuflen;
58 __u32 rcvbufoffset;
61 static struct kmem_cache *cor_rdmsg_slab;
63 static DEFINE_MUTEX(cor_rds_lock);
64 static struct cor_rdsock *cor_crd = 0;
67 int cor_rd_socket_release(struct socket *sock)
69 mutex_lock(&cor_rds_lock);
71 BUG_ON(((struct cor_rdsock *) sock->sk) != cor_crd);
73 cor_config_down();
75 cor_set_interface_config(0, 0, 0);
77 while (list_empty(&(cor_crd->rcv_msgs)) == 0) {
78 struct cor_rd_msg *rdm = container_of(cor_crd->rcv_msgs.next,
79 struct cor_rd_msg, lh);
81 list_del(&(rdm->lh));
82 if (rdm->cs != 0) {
83 list_del(&(rdm->cs_lh));
84 kref_put(&(rdm->cs->ref), cor_free_sock);
85 rdm->cs = 0;
87 kmem_cache_free(cor_rdmsg_slab, rdm);
90 while (list_empty(&(cor_crd->socks)) == 0) {
91 struct cor_sock *cs = container_of(cor_crd->socks.next,
92 struct cor_sock, data.conn_managed.crd_lh);
94 BUG_ON(cs->type != CS_TYPE_CONN_MANAGED);
95 BUG_ON(cs->data.conn_managed.in_crd_list == 0);
96 list_del(&(cs->data.conn_managed.crd_lh));
97 cs->data.conn_managed.in_crd_list = 0;
98 _cor_set_sock_connecterror(cs, -ENETUNREACH);
99 kref_put(&(cs->ref), cor_free_sock);
102 if (cor_crd->cmdparams != 0) {
103 kfree(cor_crd->cmdparams);
104 cor_crd->cmdparams = 0;
107 cor_crd = 0;
109 mutex_unlock(&cor_rds_lock);
111 sock_put(sock->sk);
113 return 0;
116 int cor_rd_socket_bind(struct socket *sock, struct sockaddr *saddr,
117 int sockaddr_len)
119 return -EOPNOTSUPP;
122 int cor_rd_socket_connect(struct socket *sock, struct sockaddr *saddr,
123 int sockaddr_len, int flags)
125 struct cor_rdsock *crd = (struct cor_rdsock *) sock->sk;
127 atomic_set(&(crd->connected), 1);
129 lock_sock(sock->sk);
130 sock->state = SS_CONNECTED;
131 release_sock(sock->sk);
132 return 0;
135 int cor_rd_socket_accept(struct socket *sock, struct socket *newsock, int flags,
136 bool kern)
138 return -EOPNOTSUPP;
141 int cor_rd_socket_listen(struct socket *sock, int len)
143 return -EOPNOTSUPP;
146 int cor_rd_socket_shutdown(struct socket *sock, int flags)
148 return 0;
151 int cor_rd_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
153 return -ENOIOCTLCMD;
156 int cor_rd_setsockopt(struct socket *sock, int level,
157 int optname, char __user *optval, unsigned int optlen)
159 return -ENOPROTOOPT;
162 int cor_rd_getsockopt(struct socket *sock, int level,
163 int optname, char __user *optval, int __user *optlen)
165 return -ENOPROTOOPT;
168 static int cor_rd_parse_version(struct cor_rdsock *crd, __u32 cmd,
169 char *param, __u32 paramlen)
171 int rc = 0;
172 __u32 version;
174 mutex_lock(&cor_rds_lock);
176 BUG_ON(crd == 0);
178 if (paramlen != 4)
179 goto err;
181 version = cor_parse_u32(param);
182 if (version != 0)
183 goto err;
185 if (crd->versioninited != 0)
186 goto err;
188 crd->versioninited = 1;
190 if (0) {
191 err:
192 rc = 1;
194 mutex_unlock(&cor_rds_lock);
196 return rc;
199 /* interface_config_lock must be held */
202 static int _cor_rd_parse_up_interfaces(struct cor_rdsock *crd, char *param,
203 __u32 paramlen, __u32 *offset)
205 __u32 num_intf;
206 __u32 i;
207 struct cor_interface_config *newconfig = 0;
209 if (unlikely(*offset + 4 > paramlen))
210 return 1;
212 num_intf = cor_parse_u32(param + *offset);
213 *offset += 4;
215 if (unlikely(num_intf > 65536))
216 return 1;
218 newconfig = kmalloc(num_intf * sizeof(struct cor_interface_config),
219 GFP_KERNEL);
220 if (unlikely(newconfig == 0))
221 return 1;
223 memset(newconfig, 0, num_intf * sizeof(struct cor_interface_config));
225 for (i=0;i<num_intf;i++) {
226 struct cor_interface_config *newconfig_curr = &(newconfig[i]);
228 if (unlikely(*offset + 4 > paramlen))
229 goto out_err;
231 newconfig_curr->name_len = cor_parse_u32(param + *offset);
232 *offset += 4;
234 if (unlikely(*offset + newconfig_curr->name_len < paramlen))
235 goto out_err;
237 newconfig_curr->name = kmalloc(newconfig_curr->name_len,
238 GFP_KERNEL);
239 if (unlikely(newconfig_curr->name == 0))
240 goto out_err;
242 memcpy(newconfig_curr->name, param + *offset,
243 newconfig_curr->name_len);
244 *offset += newconfig_curr->name_len;
247 cor_set_interface_config(newconfig, num_intf, 0);
249 return 0;
251 out_err:
252 while (i>0) {
253 struct cor_interface_config *newconfig_curr;
255 i--;
257 newconfig_curr = &(newconfig[i]);
259 BUG_ON(newconfig_curr->name == 0);
260 kfree(newconfig_curr->name);
261 newconfig_curr->name = 0;
263 kfree(newconfig);
264 return 1;
267 static int cor_rd_parse_up(struct cor_rdsock *crd, __u32 cmd,
268 char *param, __u32 paramlen)
270 __u32 offset = 0;
272 __u64 flags;
273 __u32 addrlen;
274 char *addr = 0;
276 if (unlikely(paramlen < 12))
277 return 1;
279 flags = cor_parse_u64(param);
280 offset += 8;
282 addrlen = cor_parse_u32(param + offset);
283 offset += 4;
285 if (addrlen > 0) {
286 if (unlikely(unlikely(addrlen > 64) ||
287 unlikely(offset + addrlen > paramlen)))
288 return 1;
289 addr = param + offset;
290 offset += addrlen;
293 if ((flags & CRD_UTK_UP_FLAGS_INTERFACES) != 0) {
294 if (_cor_rd_parse_up_interfaces(crd, param, paramlen, &offset)
295 != 0) {
296 return 1;
298 } else {
299 cor_set_interface_config(0, 0, 1);
302 if (cor_config_up(addr, addrlen) != 0)
303 return 1;
305 return 0;
308 static int cor_rd_parse_connecterror(struct cor_rdsock *crd, __u32 cmd,
309 char *param, __u32 paramlen)
311 __be64 cookie;
312 __u32 error;
313 int errorno;
315 if (unlikely(paramlen < 12))
316 return 1;
318 cookie = cor_parse_be64(param);
319 error = cor_parse_u32(param + 8);
321 if (error == CRD_UTK_CONNECTERROR_ACCES) {
322 errorno = -EACCES;
323 } else if (error == CRD_UTK_CONNECTERROR_NETUNREACH) {
324 errorno = -ENETUNREACH;
325 } else if (error == CRD_UTK_CONNECTERROR_TIMEDOUT) {
326 errorno = -ETIMEDOUT;
327 } else if (error == CRD_UTK_CONNECTERROR_REFUSED) {
328 errorno = -ECONNREFUSED;
329 } else {
330 errorno = -ENETUNREACH;
333 cor_set_sock_connecterror(cookie, errorno);
335 return 0;
338 static int cor_rd_parse(struct cor_rdsock *crd, __u32 cmd, char *param,
339 __u32 paramlen)
341 if (unlikely(unlikely(cmd != CRD_UTK_VERSION) &&
342 unlikely(crd->versioninited == 0)))
343 return 1;
345 if (cmd == CRD_UTK_VERSION) {
346 return cor_rd_parse_version(crd, cmd, param, paramlen);
347 } else if (cmd == CRD_UTK_UP) {
348 return cor_rd_parse_up(crd, cmd, param, paramlen);
349 } else if (cmd == CRD_UTK_CONNECTERROR) {
350 return cor_rd_parse_connecterror(crd, cmd, param, paramlen);
351 } else {
352 return 1;
356 static int _cor_rd_sendmsg_hdr(struct cor_rdsock *crd, struct msghdr *msg,
357 __u32 len)
359 __u32 cpy;
360 size_t st_rc;
362 BUG_ON(len == 0);
363 BUG_ON(len > (1024 * 1024 * 1024));
365 BUG_ON(crd->snd_cmdplen_read > 8);
366 cpy = (8 - crd->snd_cmdplen_read);
367 if (unlikely(cpy > len))
368 cpy = len;
370 st_rc = copy_from_iter(crd->snd_cmdplen_buf +
371 crd->snd_cmdplen_read, cpy, &(msg->msg_iter));
373 if (unlikely(st_rc != cpy))
374 return -EFAULT;
376 crd->snd_cmdplen_read += cpy;
378 return cpy;
381 static int _cor_rd_sendmsg_body(struct cor_rdsock *crd, struct msghdr *msg,
382 __u32 len)
384 __u32 cpy = 0;
386 __u32 cmd;
387 __u32 paramlen;
389 BUG_ON(len == 0);
390 BUG_ON(len > (1024 * 1024 * 1024));
392 BUG_ON(crd->snd_cmdplen_read != 8);
394 cmd = cor_parse_u32(crd->snd_cmdplen_buf);
395 paramlen = cor_parse_u32(crd->snd_cmdplen_buf + 4);
397 if (crd->cmdparams == 0 && paramlen != 0) {
398 BUG_ON(crd->param_read != 0);
399 if (unlikely(paramlen > MAX_SND_MSGLEN))
400 return -ECONNRESET;
402 crd->cmdparams = kmalloc(paramlen, GFP_KERNEL);
403 if (unlikely(crd->cmdparams == 0))
404 return -ENOMEM;
407 if (crd->param_read < paramlen) {
408 size_t st_rc;
410 cpy = (paramlen - crd->param_read);
411 if (cpy > len)
412 cpy = len;
414 BUG_ON(crd->cmdparams == 0);
416 st_rc = copy_from_iter(crd->cmdparams +
417 crd->param_read, cpy, &(msg->msg_iter));
419 if (unlikely(st_rc != cpy))
420 return -EFAULT;
422 crd->param_read += cpy;
425 BUG_ON(crd->param_read > paramlen);
427 if (crd->param_read == paramlen) {
428 int rc = cor_rd_parse(crd, cmd, crd->cmdparams, paramlen);
429 if (unlikely(rc != 0))
430 return -ECONNRESET;
432 memset(crd->snd_cmdplen_buf, 0,
433 sizeof(crd->snd_cmdplen_read));
434 crd->snd_cmdplen_read = 0;
435 crd->param_read = 0;
436 kfree(crd->cmdparams);
437 crd->cmdparams = 0;
440 return cpy;
443 static int _cor_rd_sendmsg(struct cor_rdsock *crd, struct msghdr *msg,
444 __u32 len)
446 if (crd->snd_cmdplen_read < 8) {
447 return _cor_rd_sendmsg_hdr(crd, msg, len);
448 } else {
449 return _cor_rd_sendmsg_body(crd, msg, len);
453 int cor_rd_sendmsg(struct socket *sock, struct msghdr *msg, size_t total_len)
455 struct cor_rdsock *crd = (struct cor_rdsock *) sock->sk;
457 int rc = 0;
458 __u32 totalread = 0;
459 __u32 currread = 0;
461 __u32 len;
463 if (unlikely(total_len > 1024 * 1024 * 1024))
464 len = 1024 * 1024 * 1024;
465 else
466 len = (__u32) total_len;
468 if (unlikely(atomic_read(&(crd->connected)) == 0))
469 return -ENOTCONN;
471 mutex_lock(&(crd->sndbuf_lock));
473 while (currread < len) {
474 rc = _cor_rd_sendmsg(crd, msg, len - currread);
475 if (unlikely(rc < 0))
476 goto out;
477 currread += rc;
478 totalread += currread;
481 out:
482 mutex_unlock(&(crd->sndbuf_lock));
484 if (rc >= 0 && totalread != 0) {
485 BUG_ON(totalread > (1024 * 1024 * 1024));
486 rc = totalread;
489 return rc;
492 static void cor_fill_msgbuf_supportedversions(struct cor_rdsock *crd,
493 struct cor_rd_msg *rdm)
495 BUG_ON(rdm->cs != 0);
497 BUG_ON(MAX_MSG_LEN < 16);
499 cor_put_u32(crd->rcvbuf, CRD_KTU_SUPPORTEDVERSIONS);
500 cor_put_u32(crd->rcvbuf + 4, 8); /* len */
501 cor_put_u32(crd->rcvbuf + 8, 0);
502 cor_put_u32(crd->rcvbuf + 12, 0);
504 crd->rcvbuflen = 16;
507 static void cor_fill_msgbuf_connect(struct cor_rdsock *crd,
508 struct cor_rd_msg *rdm)
510 char *remoteaddr;
511 __u32 remoteaddr_len;
513 BUG_ON(rdm->cs == 0);
514 mutex_lock(&(rdm->cs->lock));
515 BUG_ON(rdm->cs->type != CS_TYPE_CONN_MANAGED);
517 remoteaddr = (char *) &(rdm->cs->data.conn_managed.remoteaddr);
518 remoteaddr_len = sizeof(struct cor_sockaddr);
519 BUG_ON(remoteaddr_len != 68);
521 BUG_ON(MAX_MSG_LEN < (20 + remoteaddr_len));
523 cor_put_u32(crd->rcvbuf, CRD_KTU_CONNECT);
524 cor_put_u32(crd->rcvbuf + 4, 12 + remoteaddr_len);
525 cor_put_be64(crd->rcvbuf + 8, rdm->cs->data.conn_managed.cookie);
526 memcpy(crd->rcvbuf + 16, remoteaddr, remoteaddr_len);
527 cor_put_u32(crd->rcvbuf + 16 + remoteaddr_len, rdm->cs->is_highlatency ?
528 COR_TOS_HIGH_LATENCY : COR_TOS_LOW_LATENCY);
530 crd->rcvbuflen = 20 + remoteaddr_len;
531 mutex_unlock(&(rdm->cs->lock));
534 static void _cor_fill_msgbuf(struct cor_rdsock *crd, struct cor_rd_msg *rdm)
536 if (rdm->type == CRD_KTU_SUPPORTEDVERSIONS) {
537 cor_fill_msgbuf_supportedversions(crd, rdm);
538 } else if (rdm->type == CRD_KTU_CONNECT) {
539 cor_fill_msgbuf_connect(crd, rdm);
540 } else {
541 BUG();
545 static int cor_fill_msgbuf(struct socket *sock, struct cor_rdsock *crd,
546 int blocking)
548 int rc = 0;
549 struct cor_rd_msg *rdm = 0;
551 while(1) {
552 mutex_lock(&cor_rds_lock);
553 if (list_empty(&(crd->rcv_msgs)) == 0)
554 break;
555 atomic_set(&(crd->ready_to_read), 0);
556 mutex_unlock(&cor_rds_lock);
558 if (blocking == 0)
559 return -EAGAIN;
561 if (wait_event_interruptible(*sk_sleep(sock->sk),
562 atomic_read(&(crd->ready_to_read)) != 0) != 0)
563 return -ERESTARTSYS;
566 rdm = container_of(crd->rcv_msgs.next, struct cor_rd_msg, lh);
567 list_del(&(rdm->lh));
569 if (rdm->cs != 0)
570 list_del(&(rdm->cs_lh));
572 mutex_unlock(&cor_rds_lock);
574 memset(crd->rcvbuf, 0, sizeof(crd->rcvbuf));
575 crd->rcvbuflen = 0;
576 crd->rcvbufoffset = 0;
578 _cor_fill_msgbuf(crd, rdm);
580 if (rdm->cs != 0) {
581 kref_put(&(rdm->cs->ref), cor_free_sock);
582 rdm->cs = 0;
585 kmem_cache_free(cor_rdmsg_slab, rdm);
587 return rc;
590 int cor_rd_recvmsg(struct socket *sock, struct msghdr *msg, size_t total_len,
591 int flags)
593 int copied = 0;
594 int blocking = (flags & MSG_DONTWAIT) == 0;
596 struct cor_rdsock *crd = (struct cor_rdsock *) sock->sk;
598 __u32 totallen;
600 if (unlikely(total_len > 1024 * 1024 * 1024))
601 totallen = 1024 * 1024 * 1024;
602 else
603 totallen = (__u32) total_len;
606 if (unlikely((flags & MSG_PEEK) != 0))
607 return -EINVAL;
609 if (unlikely(atomic_read(&(crd->connected)) == 0))
610 return -ENOTCONN;
612 mutex_lock(&(crd->rcvbuf_lock));
613 while (copied < totallen) {
614 __u32 len = totallen - copied;
615 size_t st_rc;
617 if (crd->rcvbufoffset == crd->rcvbuflen) {
618 int rc = cor_fill_msgbuf(sock, crd,
619 blocking && copied == 0);
620 if (rc != 0 && copied == 0)
621 copied = rc;
622 if (rc != 0)
623 break;
626 BUG_ON(crd->rcvbufoffset > crd->rcvbuflen);
628 if (len > (crd->rcvbuflen - crd->rcvbufoffset))
629 len = crd->rcvbuflen - crd->rcvbufoffset;
631 st_rc = copy_to_iter(crd->rcvbuf + crd->rcvbufoffset, len,
632 &(msg->msg_iter));
634 if (unlikely(st_rc != len)) {
635 copied = -EFAULT;
636 break;
639 copied += len;
640 crd->rcvbufoffset += len;
642 mutex_unlock(&(crd->rcvbuf_lock));
644 BUG_ON(copied > 0 && unlikely((copied > total_len ||
645 copied > totallen)));
647 return copied;
650 static unsigned int cor_rd_poll(struct file *file, struct socket *sock,
651 poll_table *wait)
653 unsigned int mask = 0;
655 struct cor_rdsock *crd = (struct cor_rdsock *) sock->sk;
657 if (unlikely(atomic_read(&(crd->connected)) == 0))
658 return 0;
660 sock_poll_wait(file, sock, wait);
662 mutex_lock(&(crd->rcvbuf_lock));
663 mutex_lock(&cor_rds_lock);
665 if (crd->rcvbufoffset != crd->rcvbuflen ||
666 (list_empty(&(crd->rcv_msgs)) == 0))
667 mask |= (POLLIN | POLLRDNORM);
669 mutex_unlock(&cor_rds_lock);
670 mutex_unlock(&(crd->rcvbuf_lock));
672 mask |= (POLLOUT | POLLWRNORM);
674 return mask;
677 struct proto cor_rd_proto = {
678 .name = "cor_rd",
679 .obj_size = sizeof(struct cor_rdsock),
680 .owner = THIS_MODULE,
683 const struct proto_ops cor_rd_proto_ops = {
684 .family = PF_COR,
685 .owner = THIS_MODULE,
686 .release = cor_rd_socket_release,
687 .bind = cor_rd_socket_bind,
688 .connect = cor_rd_socket_connect,
689 .accept = cor_rd_socket_accept,
690 .listen = cor_rd_socket_listen,
691 .shutdown = cor_rd_socket_shutdown,
692 .ioctl = cor_rd_ioctl,
693 .setsockopt = cor_rd_setsockopt,
694 .getsockopt = cor_rd_getsockopt,
695 #ifdef CONFIG_COMPAT
696 .combat_ioctl = cor_rd_ioctl,
697 .compat_setsockopt = cor_rd_setsockopt,
698 .compat_getsockopt = cor_rd_getsockopt,
699 #endif
700 .sendmsg = cor_rd_sendmsg,
701 .recvmsg = cor_rd_recvmsg,
702 .poll = cor_rd_poll,
704 .socketpair = cor_socket_socketpair,
705 .getname = cor_socket_getname,
706 .mmap = cor_socket_mmap,
708 /* sendpage, splice_read, are optional */
711 int cor_create_rdaemon_sock(struct net *net, struct socket *sock, int protocol,
712 int kern)
714 struct cor_rd_msg *rdm = 0;
715 struct cor_rdsock *newcrd = 0;
717 rdm = kmem_cache_alloc(cor_rdmsg_slab, GFP_KERNEL);
718 if (unlikely(rdm == 0))
719 return -ENOMEM;
721 newcrd = (struct cor_rdsock *) sk_alloc(net, PF_COR, GFP_KERNEL,
722 &cor_rd_proto, kern);
723 if (unlikely(newcrd == 0)) {
724 kmem_cache_free(cor_rdmsg_slab, rdm);
725 return -ENOMEM;
728 sock_init_data(sock, (struct sock *) newcrd);
729 newcrd->sk.sk_protocol = protocol;
730 memset(((char *)newcrd) + sizeof(struct sock), 0,
731 sizeof(struct cor_rdsock) - sizeof(struct sock));
733 atomic_set(&(newcrd->connected), 0);
734 INIT_LIST_HEAD(&(newcrd->socks));
735 mutex_init(&(newcrd->sndbuf_lock));
736 mutex_init(&(newcrd->rcvbuf_lock));
737 atomic_set(&(newcrd->ready_to_read), 0);
738 INIT_LIST_HEAD(&(newcrd->rcv_msgs));
740 mutex_lock(&cor_rds_lock);
741 if (cor_crd != 0) {
742 sock_put((struct sock *) newcrd);
743 mutex_unlock(&cor_rds_lock);
744 kmem_cache_free(cor_rdmsg_slab, rdm);
745 return -EACCES;
747 cor_crd = newcrd;
749 memset(rdm, 0, sizeof(struct cor_rd_msg));
750 rdm->type = CRD_KTU_SUPPORTEDVERSIONS;
751 list_add_tail(&(rdm->lh), &(cor_crd->rcv_msgs));
753 atomic_set(&(newcrd->ready_to_read), 1);
755 mutex_unlock(&cor_rds_lock);
757 sock->state = SS_UNCONNECTED;
758 sock->ops = &cor_rd_proto_ops;
759 sock->sk = (struct sock *) cor_crd;
761 return 0;
764 int cor_rdreq_connect(struct cor_sock *cs)
766 int rc;
768 struct cor_rd_msg *rdm = kmem_cache_alloc(cor_rdmsg_slab, GFP_KERNEL);
770 mutex_lock(&cor_rds_lock);
771 mutex_lock(&(cs->lock));
773 BUG_ON(cs->type != CS_TYPE_CONN_MANAGED);
774 BUG_ON(cs->data.conn_managed.cookie == 0);
776 if (unlikely(cor_crd == 0 || atomic_read(&(cor_crd->connected)) == 0 ||
777 cor_crd->versioninited == 0)) {
778 rc = -ENETUNREACH;
779 goto out;
782 if (unlikely(rdm == 0)) {
783 rc = -ETIMEDOUT;
784 goto out;
787 memset(rdm, 0, sizeof(struct cor_rd_msg));
789 kref_get(&(cs->ref));
790 list_add_tail(&(rdm->cs_lh), &(cs->data.conn_managed.rd_msgs));
791 rdm->cs = cs;
792 rdm->type = CRD_KTU_CONNECT;
794 if (list_empty(&(cor_crd->rcv_msgs))) {
795 atomic_set(&(cor_crd->ready_to_read), 1);
796 barrier();
797 cor_crd->sk.sk_data_ready(&(cor_crd->sk));
799 list_add_tail(&(rdm->lh), &(cor_crd->rcv_msgs));
800 kref_get(&(cs->ref));
802 kref_get(&(cs->ref));
803 list_add_tail(&(cs->data.conn_managed.crd_lh), &(cor_crd->socks));
804 cs->data.conn_managed.in_crd_list = 1;
806 rc = -EINPROGRESS;
808 out:
809 mutex_unlock(&(cs->lock));
810 mutex_unlock(&cor_rds_lock);
812 return rc;
815 void cor_usersock_release(struct cor_sock *cs)
817 mutex_lock(&cor_rds_lock);
818 mutex_lock(&(cs->lock));
820 if (cs->type != CS_TYPE_CONN_MANAGED)
821 goto out;
823 while (list_empty(&(cs->data.conn_managed.rd_msgs)) == 0) {
824 struct cor_rd_msg *rdm = container_of(
825 cs->data.conn_managed.rd_msgs.next,
826 struct cor_rd_msg, cs_lh);
828 list_del(&(rdm->lh));
829 BUG_ON(rdm->cs != cs);
830 list_del(&(rdm->cs_lh));
831 kref_put(&(cs->ref), cor_kreffree_bug);
832 rdm->cs = 0;
833 kmem_cache_free(cor_rdmsg_slab, rdm);
836 if (cs->data.conn_managed.in_crd_list != 0) {
837 list_del(&(cs->data.conn_managed.crd_lh));
838 cs->data.conn_managed.in_crd_list = 0;
839 kref_put(&(cs->ref), cor_kreffree_bug);
842 out:
843 mutex_unlock(&(cs->lock));
844 mutex_unlock(&cor_rds_lock);
847 int __init cor_rd_init1(void)
849 cor_rdmsg_slab = kmem_cache_create("cor_rdmsg",
850 sizeof(struct cor_rd_msg), 8, 0, 0);
851 if (unlikely(cor_rdmsg_slab == 0))
852 return -ENOMEM;
854 return 0;
857 int __init cor_rd_init2(void)
859 return proto_register(&cor_rd_proto, 1);
862 void __exit cor_rd_exit1(void)
864 proto_unregister(&cor_rd_proto);
867 void __exit cor_rd_exit2(void)
869 kmem_cache_destroy(cor_rdmsg_slab);
870 cor_rdmsg_slab = 0;
873 MODULE_LICENSE("GPL");