32bit seqno, bugfixes
[cor.git] / net / cor / sock_managed.c
blobdc1c1ecdff149cc79da471393cd53a035eab5983
1 /**
2 * Connection oriented routing
3 * Copyright (C) 2007-2021 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <net/sock.h>
17 #include <linux/net.h>
18 #include <linux/uaccess.h>
20 #include <linux/crc32c.h>
21 #include <linux/err.h>
22 #include <linux/scatterlist.h>
24 #include "cor.h"
27 static DEFINE_SPINLOCK(cor_cookie_gen);
29 static DEFINE_SPINLOCK(cor_sock_cookie_lock);
30 static struct rb_root cor_sock_cookie_rb;
32 #warning todo which lock protects sk_err, sk_rcvtimeo and sk_sndtimeo ???
35 struct cor_sock *cor_get_sock_by_cookie(__be64 cookie)
37 struct rb_node *n = 0;
38 struct cor_sock *ret = 0;
40 spin_lock_bh(&cor_sock_cookie_lock);
42 n = cor_sock_cookie_rb.rb_node;
44 while (likely(n != 0) && ret == 0) {
45 struct cor_sock *cs = container_of(n, struct cor_sock,
46 data.conn_managed.rbn);
48 BUG_ON(cs->type != CS_TYPE_CONN_MANAGED);
49 BUG_ON(cs->data.conn_managed.cookie == 0);
51 if (cookie < cs->data.conn_managed.cookie)
52 n = n->rb_left;
53 else if (cookie > cs->data.conn_managed.cookie)
54 n = n->rb_right;
55 else
56 ret = cs;
59 if (ret != 0)
60 kref_get(&ret->ref);
62 spin_unlock_bh(&cor_sock_cookie_lock);
64 return ret;
67 static void cor_insert_sock_cookie(struct cor_sock *ins_l)
69 struct rb_root *root;
70 struct rb_node **p;
71 struct rb_node *parent = 0;
73 __u64 cookie = ins_l->data.conn_managed.cookie;
75 BUG_ON(ins_l->type != CS_TYPE_CONN_MANAGED);
76 BUG_ON(ins_l->data.conn_managed.cookie == 0);
78 spin_lock_bh(&cor_sock_cookie_lock);
80 root = &cor_sock_cookie_rb;
81 p = &root->rb_node;
83 while ((*p) != 0) {
84 struct cor_sock *curr = container_of(*p,
85 struct cor_sock, data.conn_managed.rbn);
87 BUG_ON(curr->type != CS_TYPE_CONN_MANAGED);
88 BUG_ON(curr->data.conn_managed.cookie == 0);
90 parent = *p;
91 if (unlikely(cookie == curr->data.conn_managed.cookie)) {
92 BUG();
93 } else if (cookie < curr->data.conn_managed.cookie) {
94 p = &(*p)->rb_left;
95 } else if (cookie > curr->data.conn_managed.cookie) {
96 p = &(*p)->rb_right;
100 kref_get(&ins_l->ref);
101 rb_link_node(&ins_l->data.conn_managed.rbn, parent, p);
102 rb_insert_color(&ins_l->data.conn_managed.rbn, root);
104 spin_unlock_bh(&cor_sock_cookie_lock);
107 static int cor_alloc_corsock_cookie(struct cor_sock *cs_m_l)
109 __be64 cookie;
110 int i;
112 BUG_ON(cs_m_l->type != CS_TYPE_CONN_MANAGED);
113 BUG_ON(cs_m_l->data.conn_managed.cookie != 0);
115 spin_lock_bh(&cor_cookie_gen);
116 for (i = 0; i < 16; i++) {
117 struct cor_sock *cs2 = 0;
119 cookie = 0;
120 get_random_bytes((char *) &cookie, sizeof(cookie));
122 if (unlikely(cookie == 0))
123 continue;
125 cs2 = cor_get_sock_by_cookie(cookie);
126 if (unlikely(cs2 != 0)) {
127 kref_put(&cs2->ref, cor_free_sock);
128 continue;
131 goto found;
133 spin_unlock_bh(&cor_cookie_gen);
134 return 1;
136 found:
137 cs_m_l->data.conn_managed.cookie = cookie;
138 cor_insert_sock_cookie(cs_m_l);
139 spin_unlock_bh(&cor_cookie_gen);
140 return 0;
143 static void _cor_mngdsocket_shutdown(struct cor_sock *cs_m_l, int flags);
145 static int cor_mngdsocket_closefinished(struct cor_sock *cs_m)
147 int rc = 0;
149 mutex_lock(&cs_m->lock);
150 BUG_ON(cs_m->type != CS_TYPE_CONN_MANAGED);
152 if (cs_m->data.conn_managed.rcvd_eof != 0 &&
153 cs_m->data.conn_managed.rcvd_rcvend != 0) {
154 rc = 1;
155 } else if (cs_m->data.conn_managed.src_sock == 0 ||
156 cs_m->data.conn_managed.trgt_sock == 0 ||
157 cs_m->data.conn_managed.is_reset != 0) {
158 rc = 1;
159 } else {
160 spin_lock_bh(&cs_m->data.conn_managed.src_sock->rcv_lock);
161 if (cs_m->data.conn_managed.src_sock->isreset != 0)
162 rc = 1;
163 spin_unlock_bh(&cs_m->data.conn_managed.src_sock->rcv_lock);
166 mutex_unlock(&cs_m->lock);
168 return rc;
171 static void cor_mngdsocket_release_closewait(struct cor_sock *cs_m,
172 long timeout)
174 while (cor_mngdsocket_closefinished(cs_m) == 0) {
175 long waitret;
177 if (atomic_read(&cs_m->ready_to_read) != 0)
178 msleep(10);
180 waitret = wait_event_interruptible_timeout(
181 *sk_sleep(&cs_m->sk),
182 atomic_read(&cs_m->ready_to_read) != 0,
183 timeout);
185 if (waitret <= 0)
186 break;
190 static void cor_mngdsocket_release_mngd(struct cor_sock *cs_m)
192 long timeout = 0;
194 mutex_lock(&cs_m->lock);
195 BUG_ON(cs_m->type != CS_TYPE_CONN_MANAGED);
198 if (sock_flag(&cs_m->sk, SOCK_LINGER) &&
199 !(current->flags & PF_EXITING))
200 timeout = cs_m->sk.sk_lingertime;
202 if (timeout > 0) {
203 _cor_mngdsocket_shutdown(cs_m, SHUT_RDWR);
204 mutex_unlock(&cs_m->lock);
206 cor_mngdsocket_release_closewait(cs_m, timeout);
208 mutex_lock(&cs_m->lock);
209 BUG_ON(cs_m->type != CS_TYPE_CONN_MANAGED);
212 cs_m->isreleased = 1;
214 if (cs_m->data.conn_managed.src_sock != 0 &&
215 cs_m->data.conn_managed.trgt_sock != 0) {
216 cor_reset_conn(cs_m->data.conn_managed.src_sock);
218 cor_conn_kref_put_bug(cs_m->data.conn_managed.src_sock,
219 "socket");
220 cor_conn_kref_put(cs_m->data.conn_managed.trgt_sock,
221 "socket");
223 cs_m->data.conn_managed.src_sock = 0;
224 cs_m->data.conn_managed.trgt_sock = 0;
226 mutex_unlock(&cs_m->lock);
228 cor_usersock_release(cs_m);
230 mutex_lock(&cs_m->lock);
231 BUG_ON(cs_m->type != CS_TYPE_CONN_MANAGED);
232 if (cs_m->data.conn_managed.cookie != 0) {
233 spin_lock_bh(&cor_sock_cookie_lock);
234 rb_erase(&cs_m->data.conn_managed.rbn, &cor_sock_cookie_rb);
235 kref_put(&cs_m->ref, cor_kreffree_bug);
236 spin_unlock_bh(&cor_sock_cookie_lock);
237 cs_m->data.conn_managed.cookie = 0;
239 mutex_unlock(&cs_m->lock);
242 int cor_mngdsocket_release(struct socket *sock)
244 struct cor_sock *cs = (struct cor_sock *) sock->sk;
245 __u8 type;
247 if (sock->sk == 0) {
248 /* accept may return before newsock is initialised */
249 return 0;
252 mutex_lock(&cs->lock);
253 type = cs->type;
254 if (type != CS_TYPE_CONN_MANAGED)
255 cs->isreleased = 1;
256 mutex_unlock(&cs->lock);
258 if (type == CS_TYPE_UNCONNECTED) {
259 } else if (type == CS_TYPE_LISTENER) {
260 cor_close_port(cs);
261 } else if (type == CS_TYPE_CONN_MANAGED) {
262 cor_mngdsocket_release_mngd(cs);
263 } else {
264 BUG();
267 kref_put(&cs->ref, cor_free_sock);
269 return 0;
272 int cor_mngdsocket_bind(struct socket *sock, struct sockaddr *saddr,
273 int sockaddr_len)
275 int rc = 0;
276 struct cor_sock *cs = (struct cor_sock *) sock->sk;
277 struct cor_sockaddr *addr = (struct cor_sockaddr *) saddr;
279 if (unlikely(sockaddr_len < sizeof(struct cor_sockaddr)))
280 return -EINVAL;
282 if (unlikely(addr->sin_family != AF_COR))
283 return -EINVAL;
285 if (unlikely(be64_to_cpu(addr->addr) != 0))
286 return -EINVAL;
288 if (unlikely(be32_to_cpu(addr->port) == 0))
289 return -EINVAL;
291 mutex_lock(&cs->lock);
292 if (unlikely(cs->type != CS_TYPE_UNCONNECTED))
293 rc = -EINVAL;
294 else
295 rc = cor_open_port(cs, addr->port);
296 mutex_unlock(&cs->lock);
298 return rc;
301 static int cor_mngdsocket_init_conn_managed(struct cor_sock *cs_l,
302 char *rcvbuf, char *sndbuf)
304 BUG_ON(rcvbuf == 0);
305 BUG_ON(sndbuf == 0);
306 BUG_ON(cs_l->type != CS_TYPE_CONN_MANAGED);
308 memset(&cs_l->data.conn_managed, 0, sizeof(cs_l->data.conn_managed));
310 INIT_LIST_HEAD(&cs_l->data.conn_managed.rd_msgs);
311 cs_l->data.conn_managed.rcv_buf = rcvbuf;
312 cs_l->data.conn_managed.rcv_buf_state = RCV_BUF_STATE_INCOMPLETE;
313 cs_l->data.conn_managed.snd_buf = sndbuf;
314 cs_l->data.conn_managed.snd_segment_size = CONN_MNGD_MAX_SEGMENT_SIZE;
316 return 0;
319 static int cor_mngdsocket_connect(struct socket *sock,
320 struct sockaddr *saddr, int sockaddr_len, int flags)
322 struct cor_sock *cs = (struct cor_sock *) sock->sk;
323 struct cor_sockaddr *addr = (struct cor_sockaddr *) saddr;
325 char *rcvbuf;
326 char *sndbuf;
328 int rc;
330 if (unlikely(sockaddr_len < sizeof(struct cor_sockaddr)))
331 return -EINVAL;
333 if (unlikely(addr->sin_family != AF_COR))
334 return -EINVAL;
336 rcvbuf = kmalloc(CONN_MNGD_MAX_SEGMENT_SIZE, GFP_KERNEL);
337 if (unlikely(rcvbuf == 0))
338 return -ETIMEDOUT;
340 sndbuf = kmalloc(CONN_MNGD_MAX_SEGMENT_SIZE, GFP_KERNEL);
341 if (unlikely(sndbuf == 0)) {
342 kfree(rcvbuf);
343 return -ETIMEDOUT;
346 mutex_lock(&cs->lock);
347 if (unlikely(cs->type != CS_TYPE_UNCONNECTED)) {
348 mutex_unlock(&cs->lock);
349 kfree(sndbuf);
350 kfree(rcvbuf);
351 return -EISCONN;
354 cs->type = CS_TYPE_CONN_MANAGED;
355 rc = cor_mngdsocket_init_conn_managed(cs, rcvbuf, sndbuf);
356 if (unlikely(rc != 0))
357 goto err;
359 rc = cor_alloc_corsock_cookie(cs);
360 if (unlikely(rc != 0)) {
361 err:
362 cs->type = CS_TYPE_UNCONNECTED;
363 mutex_unlock(&cs->lock);
364 kfree(sndbuf);
365 kfree(rcvbuf);
366 cs->data.conn_managed.rcv_buf = 0;
367 cs->data.conn_managed.snd_buf = 0;
368 return -ETIMEDOUT;
371 memcpy(&cs->data.conn_managed.remoteaddr, addr,
372 sizeof(struct cor_sockaddr));
374 cs->data.conn_managed.connect_state = CS_CONNECTSTATE_CONNECTING;
376 mutex_unlock(&cs->lock);
378 lock_sock(&cs->sk);
379 sock->state = SS_CONNECTING;
380 release_sock(&cs->sk);
382 rc = cor_rdreq_connect(cs);
384 if (unlikely(rc != -EINPROGRESS)) {
385 mutex_lock(&cs->lock);
386 cs->data.conn_managed.connect_state =
387 CS_CONNECTSTATE_ERROR;
388 mutex_unlock(&cs->lock);
389 return rc;
392 if ((sock->file->f_flags & O_NONBLOCK) != 0)
393 goto nonblock;
395 while (1) {
396 long waitret;
398 mutex_lock(&cs->lock);
399 if (cs->data.conn_managed.connect_state !=
400 CS_CONNECTSTATE_CONNECTING) {
401 mutex_unlock(&cs->lock);
402 break;
405 atomic_set(&cs->ready_to_write, 0);
407 mutex_unlock(&cs->lock);
409 waitret = wait_event_interruptible_timeout(
410 *sk_sleep(&cs->sk),
411 atomic_read(&cs->ready_to_write) != 0,
412 cs->sk.sk_sndtimeo);
414 if (unlikely(waitret < 0))
415 return sock_intr_errno(cs->sk.sk_sndtimeo);
416 else if (unlikely(waitret == 0))
417 return -ETIMEDOUT;
420 nonblock:
421 return sock_error(&cs->sk);
424 int cor_mngdsocket_accept(struct socket *sock, struct socket *newsock,
425 int flags, bool kern)
427 struct cor_sock *cs = (struct cor_sock *) sock->sk;
429 char *rcvbuf;
430 char *sndbuf;
431 struct cor_conn *src_sock_o;
432 struct cor_conn *trgt_sock_o;
433 int rc;
434 struct cor_sock *newcs;
436 rcvbuf = kmalloc(CONN_MNGD_MAX_SEGMENT_SIZE, GFP_KERNEL);
437 if (unlikely(rcvbuf == 0))
438 return -ENOMEM;
440 sndbuf = kmalloc(CONN_MNGD_MAX_SEGMENT_SIZE, GFP_KERNEL);
441 if (unlikely(sndbuf == 0)) {
442 kfree(rcvbuf);
443 return -ENOMEM;
446 mutex_lock(&cs->lock);
448 BUG_ON(cs->type != CS_TYPE_UNCONNECTED &&
449 cs->type != CS_TYPE_LISTENER &&
450 cs->type != CS_TYPE_CONN_MANAGED);
452 if (unlikely(cs->type != CS_TYPE_LISTENER)) {
453 mutex_unlock(&cs->lock);
454 kfree(sndbuf);
455 kfree(rcvbuf);
456 return -EINVAL;
459 spin_lock_bh(&cor_bindnodes);
460 if (unlikely(cs->data.listener.queue_maxlen <= 0)) {
461 spin_unlock_bh(&cor_bindnodes);
462 mutex_unlock(&cs->lock);
463 kfree(sndbuf);
464 kfree(rcvbuf);
465 return -EINVAL;
468 while (list_empty(&cs->data.listener.conn_queue)) {
469 atomic_set(&cs->ready_to_accept, 0);
470 spin_unlock_bh(&cor_bindnodes);
471 mutex_unlock(&cs->lock);
473 if ((flags & O_NONBLOCK) != 0)
474 return -EAGAIN;
476 if (wait_event_interruptible(*sk_sleep(&cs->sk),
477 atomic_read(&cs->ready_to_accept) != 0) !=
478 0) {
479 kfree(sndbuf);
480 kfree(rcvbuf);
481 return -ERESTARTSYS;
483 mutex_lock(&cs->lock);
484 spin_lock_bh(&cor_bindnodes);
487 src_sock_o = container_of(cs->data.listener.conn_queue.next,
488 struct cor_conn, src.sock.cl_list);
490 /* cor_reset_conn(src_sock_o); */ //testing
492 BUG_ON(src_sock_o->src.sock.in_cl_list == 0);
493 list_del(&src_sock_o->src.sock.cl_list);
494 src_sock_o->src.sock.in_cl_list = 0;
496 cs->data.listener.queue_len--;
498 spin_unlock_bh(&cor_bindnodes);
499 mutex_unlock(&cs->lock);
501 spin_lock_bh(&src_sock_o->rcv_lock);
502 trgt_sock_o = cor_get_conn_reversedir(src_sock_o);
503 spin_unlock_bh(&src_sock_o->rcv_lock);
505 /* kern = 0 - ugly, bit af_unix does it too... */
506 rc = _cor_createsock(sock_net(sock->sk), newsock,
507 cs->sk.sk_protocol, 0, 0);
509 if (unlikely(rc != 0)) {
510 cor_reset_conn(src_sock_o);
511 cor_conn_kref_put(src_sock_o, "conn_queue");
512 kfree(sndbuf);
513 kfree(rcvbuf);
514 printk(KERN_ERR "cor: _cor_createsock() failed, connection resetted\n");
515 return rc;
518 newcs = (struct cor_sock *) newsock->sk;
520 if (unlikely(newcs == 0)) {
521 cor_reset_conn(src_sock_o);
522 cor_conn_kref_put(src_sock_o, "conn_queue");
523 kfree(sndbuf);
524 kfree(rcvbuf);
525 printk(KERN_ERR "cor: newsock->sk is null, connection resetted\n");
526 return -ENOMEM;
529 mutex_lock(&newcs->lock);
530 spin_lock_bh(&trgt_sock_o->rcv_lock);
531 spin_lock_bh(&src_sock_o->rcv_lock);
533 BUG_ON(trgt_sock_o->is_client == 0);
534 BUG_ON(src_sock_o->is_client != 0);
536 BUG_ON(trgt_sock_o->targettype != TARGET_SOCK);
537 BUG_ON(src_sock_o->sourcetype != SOURCE_SOCK);
539 BUG_ON(newcs->type != CS_TYPE_UNCONNECTED);
541 newcs->type = CS_TYPE_CONN_MANAGED;
542 rc = cor_mngdsocket_init_conn_managed(newcs, rcvbuf, sndbuf);
543 if (unlikely(rc != 0)) {
544 cor_reset_conn(src_sock_o);
545 cor_conn_kref_put(src_sock_o, "conn_queue");
546 kfree(sndbuf);
547 kfree(rcvbuf);
548 printk(KERN_ERR "cor: cor_mngdsocket_init_conn_managed() failed, connection resetted\n");
549 return -ENOMEM;
552 newcs->data.conn_managed.src_sock = src_sock_o;
553 newcs->data.conn_managed.trgt_sock = trgt_sock_o;
554 cor_conn_kref_get(src_sock_o, "socket");
555 cor_conn_kref_get(trgt_sock_o, "socket");
557 /* we will notice resetted conns when we try to use it */
558 if (likely(src_sock_o->isreset == 0)) {
559 src_sock_o->src.sock.ed->cs = newcs;
560 trgt_sock_o->trgt.sock.cs = newcs;
561 kref_get(&newcs->ref);
562 kref_get(&newcs->ref);
564 BUG_ON(newcs->data.conn_managed.rcv_buf == 0);
565 src_sock_o->src.sock.socktype = SOCKTYPE_MANAGED;
566 trgt_sock_o->trgt.sock.socktype = SOCKTYPE_MANAGED;
567 trgt_sock_o->trgt.sock.rcv_buf_state =
568 RCV_BUF_STATE_INCOMPLETE;
569 trgt_sock_o->trgt.sock.rcv_buf =
570 newcs->data.conn_managed.rcv_buf;
571 trgt_sock_o->trgt.sock.rcvd = 0;
573 BUG_ON(src_sock_o->src.sock.keepalive_intransit != 0);
574 src_sock_o->src.sock.ed->jiffies_keepalive_lastact =
575 jiffies - KEEPALIVE_INTERVAL_SECS * HZ + HZ;
576 cor_keepalive_req_sched_timer(src_sock_o);
579 newcs->data.conn_managed.connect_state = CS_CONNECTSTATE_CONNECTED;
581 spin_unlock_bh(&src_sock_o->rcv_lock);
582 spin_unlock_bh(&trgt_sock_o->rcv_lock);
583 mutex_unlock(&newcs->lock);
585 newsock->ops = sock->ops;
586 newsock->sk = (struct sock *) newcs;
587 newsock->state = SS_CONNECTED;
589 cor_conn_kref_put(src_sock_o, "conn_queue");
591 return 0;
594 int cor_mngdsocket_listen(struct socket *sock, int len)
596 struct cor_sock *cs = (struct cor_sock *) sock->sk;
598 mutex_lock(&cs->lock);
599 spin_lock_bh(&cor_bindnodes);
601 BUG_ON(cs->type != CS_TYPE_UNCONNECTED &&
602 cs->type != CS_TYPE_LISTENER &&
603 cs->type != CS_TYPE_CONN_MANAGED);
605 if (unlikely(cs->type != CS_TYPE_LISTENER)) {
606 mutex_unlock(&cs->lock);
607 return -EOPNOTSUPP;
610 if (len < 0)
611 len = 0;
613 cs->data.listener.queue_maxlen = len;
615 spin_unlock_bh(&cor_bindnodes);
616 mutex_unlock(&cs->lock);
618 return 0;
621 static void _cor_mngdsocket_shutdown(struct cor_sock *cs_m_l, int flags)
623 __u8 send_eof = 0;
624 __u8 send_rcvend = 0;
626 BUG_ON(cs_m_l->type != CS_TYPE_CONN_MANAGED);
628 if (flags == SHUT_RD || flags == SHUT_RDWR) {
629 if (cs_m_l->data.conn_managed.sent_rcvend == 0) {
630 send_rcvend = 1;
631 cs_m_l->data.conn_managed.sent_rcvend = 1;
634 cs_m_l->data.conn_managed.shutdown_rd = 1;
637 if (flags == SHUT_WR || flags == SHUT_RDWR) {
638 if (cs_m_l->data.conn_managed.sent_eof == 0) {
639 send_eof = 1;
640 cs_m_l->data.conn_managed.sent_eof = 1;
642 cs_m_l->data.conn_managed.shutdown_wr = 1;
644 cs_m_l->data.conn_managed.flush = 1;
647 if (send_eof != 0 || send_rcvend != 0)
648 cor_mngdsocket_flushtoconn_ctrl(cs_m_l, send_eof, send_rcvend,
649 0, 0);
652 int cor_mngdsocket_shutdown(struct socket *sock, int flags)
654 struct cor_sock *cs = (struct cor_sock *) sock->sk;
656 mutex_lock(&cs->lock);
658 BUG_ON(cs->type != CS_TYPE_UNCONNECTED &&
659 cs->type != CS_TYPE_LISTENER &&
660 cs->type != CS_TYPE_CONN_MANAGED);
662 if (unlikely(cs->type == CS_TYPE_UNCONNECTED)) {
663 mutex_unlock(&cs->lock);
664 return -ENOTCONN;
665 } else if (unlikely(cs->type != CS_TYPE_CONN_MANAGED)) {
666 mutex_unlock(&cs->lock);
667 return -EBADF;
670 _cor_mngdsocket_shutdown(cs, flags);
672 mutex_unlock(&cs->lock);
674 return 0;
677 int cor_mngdsocket_ioctl(struct socket *sock, unsigned int cmd,
678 unsigned long arg)
680 return -ENOIOCTLCMD;
683 static int cor_mngdsocket_setsockopt_publishservice(struct socket *sock,
684 char __user *optval, unsigned int optlen)
686 struct cor_sock *cs = (struct cor_sock *) sock->sk;
687 int publish;
688 int notread;
690 if (unlikely(optlen != 4))
691 return -EINVAL;
693 notread = copy_from_user(&publish, optval, 4);
694 if (unlikely(notread != 0))
695 return -EFAULT;
697 if (publish != 0 && publish != 1)
698 return -EINVAL;
700 cor_set_publish_service(cs, (__u8) publish);
702 return 0;
705 int cor_mngdsocket_setsockopt(struct socket *sock, int level,
706 int optname, char __user *optval, unsigned int optlen)
708 if (unlikely(level != SOL_COR))
709 return -ENOPROTOOPT;
711 if (optname == COR_PUBLISH_SERVICE) {
712 return cor_mngdsocket_setsockopt_publishservice(sock, optval,
713 optlen);
714 } else if (optname == COR_TOS) {
715 return cor_socket_setsockopt_tos(sock, optval, optlen);
716 } else if (optname == COR_PRIORITY) {
717 return cor_socket_setsockopt_priority(sock, optval, optlen);
718 } else {
719 return -ENOPROTOOPT;
723 int cor_mngdsocket_getsockopt(struct socket *sock, int level,
724 int optname, char __user *optval, int __user *optlen)
726 return -ENOPROTOOPT;
729 void __cor_set_sock_connecterror(struct cor_sock *cs_m_l, int errorno)
731 BUG_ON(cs_m_l->type != CS_TYPE_CONN_MANAGED);
733 if (unlikely(unlikely(cs_m_l->isreleased != 0) ||
734 unlikely(cs_m_l->data.conn_managed.connect_state !=
735 CS_CONNECTSTATE_CONNECTING)))
736 return;
738 cs_m_l->data.conn_managed.connect_state = CS_CONNECTSTATE_ERROR;
740 lock_sock(&cs_m_l->sk);
741 xchg(&cs_m_l->sk.sk_err, errorno);
742 release_sock(&cs_m_l->sk);
744 atomic_set(&cs_m_l->ready_to_read, 1);
745 atomic_set(&cs_m_l->ready_to_write, 1);
746 atomic_set(&cs_m_l->ready_to_accept, 1);
747 barrier();
748 cs_m_l->sk.sk_state_change(&cs_m_l->sk);
751 void _cor_set_sock_connecterror(struct cor_sock *cs, int errorno)
753 BUG_ON(errorno == 0);
755 if (cs == 0)
756 return;
758 mutex_lock(&cs->lock);
759 __cor_set_sock_connecterror(cs, errorno);
760 mutex_unlock(&cs->lock);
763 void cor_mngdsocket_chksum(char *hdr, __u32 hdrlen,
764 char *data, __u32 datalen,
765 char *chksum, __u32 chksum_len)
767 __u32 crc = 0;
769 BUG_ON(chksum_len != 4);
771 crc = crc32c(crc, hdr, hdrlen);
772 crc = crc32c(crc, data, datalen);
774 cor_put_u32(chksum, crc);
777 static int cor_mngdsocket_check_connected(struct cor_sock *cs_l)
779 BUG_ON(cs_l->type != CS_TYPE_UNCONNECTED &&
780 cs_l->type != CS_TYPE_LISTENER &&
781 cs_l->type != CS_TYPE_CONN_MANAGED);
782 if (unlikely(cs_l->type == CS_TYPE_UNCONNECTED)) {
783 return -ENOTCONN;
784 } else if (unlikely(cs_l->type != CS_TYPE_CONN_MANAGED)) {
785 return -EBADF;
786 } else if (unlikely(cs_l->data.conn_managed.connect_state !=
787 CS_CONNECTSTATE_CONNECTED)) {
788 return -ENOTCONN;
791 if (unlikely(cs_l->data.conn_managed.is_reset != 0))
792 return -ECONNRESET;
794 return 0;
797 static __u32 cor_get_segment_size(__u32 sndspeed_limited, __u8 is_highlatency,
798 __u32 l4overhead)
800 if (is_highlatency)
801 return 4096;
803 if (sndspeed_limited >= 4096 * 300)
804 return 4096 - l4overhead;
805 else if (sndspeed_limited >= 2048 * 200)
806 return 2048 - l4overhead;
807 else if (sndspeed_limited >= 1024 * 100)
808 return 1024 - l4overhead;
809 else if (sndspeed_limited >= 512 * 40)
810 return 512 - l4overhead;
811 else if (sndspeed_limited >= 256 * 10)
812 return 256 - l4overhead;
813 else
814 return 128 - l4overhead;
817 static int __cor_mngdsocket_sendmsg(struct msghdr *msg, __u32 totallen,
818 struct cor_sock *cs_m_l, __u32 sndspeed_limited)
820 __u32 len = totallen;
821 __u16 bufleft;
822 size_t st_rc;
824 if (cs_m_l->data.conn_managed.snd_data_len == 0)
825 cs_m_l->data.conn_managed.snd_segment_size =
826 cor_get_segment_size(sndspeed_limited,
827 cs_m_l->is_highlatency, 6);
829 BUG_ON(cs_m_l->data.conn_managed.snd_segment_size >
830 CONN_MNGD_MAX_SEGMENT_SIZE);
832 BUG_ON(totallen > (1024 * 1024 * 1024));
834 BUG_ON(cs_m_l->type != CS_TYPE_CONN_MANAGED);
835 BUG_ON(cs_m_l->data.conn_managed.send_in_progress != 0);
837 BUG_ON(cs_m_l->data.conn_managed.snd_segment_size <=
838 cs_m_l->data.conn_managed.snd_data_len);
840 bufleft = cs_m_l->data.conn_managed.snd_segment_size -
841 cs_m_l->data.conn_managed.snd_data_len;
843 if (len > bufleft)
844 len = bufleft;
845 BUG_ON(len >= 65536);
847 st_rc = copy_from_iter(cs_m_l->data.conn_managed.snd_buf +
848 cs_m_l->data.conn_managed.snd_data_len, len,
849 &msg->msg_iter);
851 if (unlikely(st_rc != len))
852 return -EFAULT;
854 cs_m_l->data.conn_managed.snd_data_len += (__u16) len;
856 return len;
859 static int _cor_mngdsocket_sendmsg(struct msghdr *msg, __u32 totallen,
860 __u32 *iovidx, __u32 *iovread, struct cor_sock *cs,
861 __u8 flush)
863 int rc;
864 struct cor_conn *src_sock;
865 __u32 sndspeed_limited;
867 mutex_lock(&cs->lock);
869 rc = cor_mngdsocket_check_connected(cs);
870 if (unlikely(rc != 0))
871 goto out;
873 if (unlikely(cs->data.conn_managed.shutdown_wr != 0)) {
874 rc = -ECONNRESET;
875 goto out;
878 BUG_ON(cs->data.conn_managed.snd_data_len >
879 cs->data.conn_managed.snd_segment_size);
881 if (unlikely(cs->data.conn_managed.send_in_progress != 0 ||
882 cs->data.conn_managed.snd_data_len ==
883 cs->data.conn_managed.snd_segment_size)) {
884 cs->data.conn_managed.flush = 0;
885 cor_mngdsocket_flushtoconn_data(cs);
886 if (cs->data.conn_managed.send_in_progress != 0 ||
887 cs->data.conn_managed.snd_data_len ==
888 cs->data.conn_managed.snd_segment_size) {
889 rc = -EAGAIN;
890 goto out;
894 src_sock = cs->data.conn_managed.src_sock;
895 if (unlikely(src_sock == 0)) {
896 rc = -ENOTCONN;
897 goto out;
900 spin_lock_bh(&src_sock->rcv_lock);
901 if (unlikely(src_sock->isreset != 0 ||
902 cor_is_src_sock(src_sock, cs) == 0)) {
903 rc = -ECONNRESET;
904 spin_unlock_bh(&src_sock->rcv_lock);
905 goto out;
906 } else if (cor_sock_sndbufavailable(src_sock, 0) == 0) {
907 rc = -EAGAIN;
908 src_sock->flush = 0;
909 atomic_set(&cs->ready_to_write, 0);
910 spin_unlock_bh(&src_sock->rcv_lock);
911 goto out;
914 sndspeed_limited = src_sock->src.sock.ed->snd_speed.speed_limited;
916 spin_unlock_bh(&src_sock->rcv_lock);
918 rc = __cor_mngdsocket_sendmsg(msg, totallen, cs, sndspeed_limited);
920 cs->data.conn_managed.flush = flush;
921 if (unlikely(likely(rc > 0) && unlikely(rc != totallen)))
922 cs->data.conn_managed.flush = 0;
924 if (flush != 0 || cs->data.conn_managed.snd_data_len ==
925 cs->data.conn_managed.snd_segment_size) {
926 cor_mngdsocket_flushtoconn_data(cs);
929 out:
930 mutex_unlock(&cs->lock);
932 return rc;
935 int cor_mngdsocket_sendmsg(struct socket *sock, struct msghdr *msg,
936 size_t total_len)
938 struct cor_sock *cs = (struct cor_sock *) sock->sk;
940 __u8 flush = ((msg->msg_flags & MSG_MORE) == 0) ? 1 : 0;
941 int blocking = (msg->msg_flags & MSG_DONTWAIT) == 0;
943 int rc = 0;
944 int copied = 0;
945 __u32 max = (1024 * 1024 * 1024);
946 __u32 totallen;
948 __u32 iovidx = 0;
949 __u32 iovread = 0;
951 totallen = total_len;
952 if (unlikely(totallen > max || total_len > max)) {
953 totallen = max;
954 flush = 0;
957 while (rc >= 0 && copied < totallen) {
958 rc = _cor_mngdsocket_sendmsg(msg, totallen - copied, &iovidx,
959 &iovread, cs, flush);
961 if (rc == -EAGAIN && blocking) {
962 long waitret;
964 waitret = wait_event_interruptible_timeout(
965 *sk_sleep(&cs->sk),
966 atomic_read(&cs->ready_to_write) != 0,
967 cs->sk.sk_sndtimeo);
969 if (unlikely(waitret < 0))
970 rc = sock_intr_errno(cs->sk.sk_sndtimeo);
971 else if (unlikely(waitret == 0))
972 rc = -ETIMEDOUT;
973 else
974 continue;
977 if (rc > 0 || copied == 0)
978 copied += rc;
979 if (unlikely(rc == -EFAULT))
980 copied = rc;
982 BUG_ON(copied > 0 && ((__u32) copied > totallen));
985 BUG_ON(copied > 0 && unlikely((copied > total_len ||
986 copied > totallen)));
988 return copied;
991 static void __cor_mngdsocket_readfromconn(struct cor_sock *cs_m_l,
992 struct cor_conn *trgt_sock_l,
993 __u8 *send_eof, __u8 *send_rcvend,
994 __u8 *keepalive_req_rcvd,
995 __be32 *keepalive_req_cookie,
996 __u8 *keepalive_resp_rcvd,
997 __be32 *keepalive_resp_cookie)
999 __u16 rcvbuf_consumed = 0;
1001 BUG_ON(trgt_sock_l->trgt.sock.rcv_buf_state != RCV_BUF_STATE_OK);
1003 if (likely((trgt_sock_l->trgt.sock.rcv_hdr_flags &
1004 CONN_MNGD_HASDATA) != 0)) {
1005 if (unlikely(trgt_sock_l->trgt.sock.rcv_data_len == 0 ||
1006 cs_m_l->data.conn_managed.shutdown_rd != 0))
1007 cs_m_l->data.conn_managed.rcv_data_len = 0;
1008 else
1009 cs_m_l->data.conn_managed.rcv_data_len =
1010 trgt_sock_l->trgt.sock.rcv_data_len;
1011 cs_m_l->data.conn_managed.rcv_buf_state = RCV_BUF_STATE_OK;
1012 return;
1015 if (unlikely((trgt_sock_l->trgt.sock.rcv_hdr_flags &
1016 CONN_MNGD_EOF) != 0)) {
1017 if (cs_m_l->data.conn_managed.sent_rcvend == 0) {
1018 *send_rcvend = 1;
1019 cs_m_l->data.conn_managed.sent_rcvend = 1;
1022 cs_m_l->data.conn_managed.rcvd_eof = 1;
1025 if (unlikely((trgt_sock_l->trgt.sock.rcv_hdr_flags &
1026 CONN_MNGD_RCVEND) != 0)) {
1027 if (cs_m_l->data.conn_managed.sent_eof == 0) {
1028 *send_eof = 1;
1029 cs_m_l->data.conn_managed.sent_eof = 1;
1032 cs_m_l->data.conn_managed.rcvd_rcvend = 1;
1035 if (unlikely((trgt_sock_l->trgt.sock.rcv_hdr_flags &
1036 CONN_MNGD_KEEPALIVE_REQ) != 0)) {
1037 BUG_ON(rcvbuf_consumed + 4 >
1038 trgt_sock_l->trgt.sock.rcv_data_len);
1040 *keepalive_req_cookie = cor_parse_be32(
1041 trgt_sock_l->trgt.sock.rcv_buf +
1042 rcvbuf_consumed);
1043 rcvbuf_consumed += 4;
1045 *keepalive_req_rcvd = 1;
1048 if (unlikely((trgt_sock_l->trgt.sock.rcv_hdr_flags &
1049 CONN_MNGD_KEEPALIVE_RESP) != 0)) {
1050 BUG_ON(rcvbuf_consumed + 4 >
1051 trgt_sock_l->trgt.sock.rcv_data_len);
1053 *keepalive_resp_cookie = cor_parse_be32(
1054 trgt_sock_l->trgt.sock.rcv_buf +
1055 rcvbuf_consumed);
1056 rcvbuf_consumed += 4;
1058 *keepalive_resp_rcvd = 1;
1061 BUG_ON(rcvbuf_consumed != trgt_sock_l->trgt.sock.rcv_data_len);
1063 trgt_sock_l->trgt.sock.rcv_buf_state = RCV_BUF_STATE_INCOMPLETE;
1064 trgt_sock_l->trgt.sock.rcvd = 0;
1067 static void _cor_mngdsocket_readfromconn(struct cor_sock *cs_m_l)
1069 __u8 do_wake_sender = 0;
1070 int reset_needed = 0;
1071 __u8 send_eof = 0;
1072 __u8 send_rcvend = 0;
1073 __u8 keepalive_req_rcvd = 0;
1074 __be32 keepalive_req_cookie = 0;
1075 __u8 keepalive_resp_rcvd = 0;
1076 __be32 keepalive_resp_cookie = 0;
1078 struct cor_conn *trgt_sock = cs_m_l->data.conn_managed.trgt_sock;
1080 spin_lock_bh(&trgt_sock->rcv_lock);
1082 if (unlikely(cor_is_trgt_sock(trgt_sock, cs_m_l) == 0))
1083 goto reset;
1085 cs_m_l->is_highlatency = trgt_sock->is_highlatency;
1087 if (unlikely(trgt_sock->isreset != 0))
1088 goto reset;
1090 BUG_ON(trgt_sock->trgt.sock.socktype != SOCKTYPE_MANAGED);
1091 BUG_ON(trgt_sock->trgt.sock.rcv_buf == 0);
1092 BUG_ON(trgt_sock->trgt.sock.rcv_buf !=
1093 cs_m_l->data.conn_managed.rcv_buf);
1095 if (cs_m_l->data.conn_managed.rcv_buf_state == RCV_BUF_STATE_OK) {
1096 cs_m_l->data.conn_managed.rcv_data_len = 0;
1097 cs_m_l->data.conn_managed.rcvbuf_consumed = 0;
1098 cs_m_l->data.conn_managed.rcv_buf_state =
1099 RCV_BUF_STATE_INCOMPLETE;
1101 trgt_sock->trgt.sock.rcv_buf_state =
1102 RCV_BUF_STATE_INCOMPLETE;
1103 trgt_sock->trgt.sock.rcvd = 0;
1106 while (cs_m_l->data.conn_managed.rcv_buf_state ==
1107 RCV_BUF_STATE_INCOMPLETE) {
1108 cor_flush_sock_managed(trgt_sock, 1, &do_wake_sender);
1110 if (trgt_sock->trgt.sock.rcv_buf_state ==
1111 RCV_BUF_STATE_INCOMPLETE) {
1112 break;
1113 } else if (unlikely(trgt_sock->trgt.sock.rcv_buf_state ==
1114 RCV_BUF_STATE_RESET)) {
1115 goto reset;
1116 } else {
1117 BUG_ON(trgt_sock->trgt.sock.rcv_buf_state !=
1118 RCV_BUF_STATE_OK);
1120 __cor_mngdsocket_readfromconn(cs_m_l, trgt_sock,
1121 &send_eof, &send_rcvend,
1122 &keepalive_req_rcvd,
1123 &keepalive_req_cookie,
1124 &keepalive_resp_rcvd,
1125 &keepalive_resp_cookie);
1129 if (unlikely(cs_m_l->data.conn_managed.rcvd_eof != 0 &&
1130 cs_m_l->data.conn_managed.rcvd_rcvend != 0 &&
1131 trgt_sock->is_client == 0)) {
1132 reset:
1133 reset_needed = 1;
1136 spin_unlock_bh(&trgt_sock->rcv_lock);
1138 if (unlikely(reset_needed)) {
1139 cor_reset_conn(trgt_sock);
1141 cor_conn_kref_put_bug(cs_m_l->data.conn_managed.src_sock,
1142 "socket");
1143 cor_conn_kref_put(cs_m_l->data.conn_managed.trgt_sock,
1144 "socket");
1146 cs_m_l->data.conn_managed.src_sock = 0;
1147 cs_m_l->data.conn_managed.trgt_sock = 0;
1149 cs_m_l->data.conn_managed.is_reset = 1;
1150 cor_sk_data_ready(cs_m_l);
1151 cor_sk_write_space(cs_m_l);
1153 return;
1154 } else if (do_wake_sender != 0) {
1155 cor_wake_sender(trgt_sock);
1158 if (unlikely(send_eof != 0 || send_rcvend != 0 ||
1159 keepalive_req_rcvd != 0)) {
1160 cor_mngdsocket_flushtoconn_ctrl(cs_m_l, send_eof,
1161 send_rcvend, keepalive_req_rcvd,
1162 keepalive_req_cookie);
1164 if (unlikely(keepalive_resp_rcvd != 0))
1165 cor_keepalive_resp_rcvd(cs_m_l, keepalive_resp_cookie);
1168 #define RC_RFC_OK 0
1169 #define RC_RFC_INCOMPLETE 1
1170 #define RC_RFC_EOF 2
1171 #define RC_RFC_RESET 3
1172 int cor_mngdsocket_readfromconn(struct cor_sock *cs_m_l)
1174 BUG_ON(cs_m_l->type != CS_TYPE_CONN_MANAGED);
1176 if (unlikely(cs_m_l->isreleased != 0))
1177 return RC_RFC_RESET;
1179 if (unlikely(cs_m_l->data.conn_managed.is_reset != 0))
1180 return RC_RFC_RESET;
1182 BUG_ON(cs_m_l->data.conn_managed.rcvbuf_consumed >
1183 cs_m_l->data.conn_managed.rcv_data_len);
1185 if (cs_m_l->data.conn_managed.rcvbuf_consumed <
1186 cs_m_l->data.conn_managed.rcv_data_len &&
1187 likely(cs_m_l->data.conn_managed.shutdown_rd == 0))
1188 return RC_RFC_OK;
1190 if (unlikely(cs_m_l->data.conn_managed.trgt_sock == 0))
1191 return RC_RFC_INCOMPLETE;
1193 _cor_mngdsocket_readfromconn(cs_m_l);
1195 if (unlikely(cs_m_l->data.conn_managed.is_reset != 0 ||
1196 cs_m_l->data.conn_managed.shutdown_rd != 0))
1197 return RC_RFC_RESET;
1199 if (unlikely(cs_m_l->data.conn_managed.shutdown_rd != 0))
1200 return RC_RFC_RESET;
1202 if (unlikely(cs_m_l->data.conn_managed.rcvd_eof != 0))
1203 return RC_RFC_EOF;
1205 if (cs_m_l->data.conn_managed.rcv_buf_state == RCV_BUF_STATE_INCOMPLETE)
1206 return RC_RFC_INCOMPLETE;
1208 return RC_RFC_OK;
1211 void cor_mngdsocket_readfromconn_wq(struct work_struct *work)
1213 struct cor_sock *cs = container_of(work, struct cor_sock,
1214 readfromconn_work);
1215 int rc;
1216 __u8 data_ready = 0;
1218 mutex_lock(&cs->lock);
1220 atomic_set(&cs->readfromconn_work_scheduled, 0);
1221 barrier();
1223 if (unlikely(cs->isreleased != 0))
1224 goto out;
1226 BUG_ON(cs->type != CS_TYPE_CONN_MANAGED);
1228 rc = cor_mngdsocket_readfromconn(cs);
1230 if (rc == RCV_BUF_STATE_OK && cs->data.conn_managed.rcv_data_len > 0)
1231 data_ready = 1;
1233 out:
1234 mutex_unlock(&cs->lock);
1236 if (data_ready != 0)
1237 cor_sk_data_ready(cs);
1239 kref_put(&cs->ref, cor_free_sock);
1242 void cor_mngdsocket_readfromconn_fromatomic(struct cor_sock *cs)
1244 if (unlikely(cs == 0))
1245 return;
1247 if (atomic_xchg(&cs->readfromconn_work_scheduled, 1) == 0) {
1248 barrier();
1249 kref_get(&cs->ref);
1250 schedule_work(&cs->readfromconn_work);
1254 static int _cor_mngdsocket_recvmsg(struct msghdr *msg, __u32 totallen,
1255 struct cor_sock *cs, int firstrecv, int peek)
1257 int rc = 0;
1258 int rfc_rc;
1260 __u32 bufleft;
1262 __u32 len;
1264 size_t st_rc;
1266 mutex_lock(&cs->lock);
1268 rc = cor_mngdsocket_check_connected(cs);
1269 if (unlikely(rc != 0))
1270 goto out;
1272 rfc_rc = cor_mngdsocket_readfromconn(cs);
1273 if (unlikely(rfc_rc == RC_RFC_RESET)) {
1274 rc = -ECONNRESET;
1275 goto out;
1276 } else if (unlikely(rfc_rc == RC_RFC_EOF)) {
1277 rc = 0;
1278 if (firstrecv)
1279 cs->data.conn_managed.shutdown_rd = 1;
1280 goto out;
1281 } else if (rfc_rc == RC_RFC_INCOMPLETE) {
1282 rc = -EAGAIN;
1283 goto out;
1286 BUG_ON(rfc_rc != RC_RFC_OK);
1288 BUG_ON(cs->data.conn_managed.rcv_data_len >= 65536);
1289 BUG_ON(cs->data.conn_managed.rcvbuf_consumed >=
1290 cs->data.conn_managed.rcv_data_len);
1291 bufleft = cs->data.conn_managed.rcv_data_len -
1292 cs->data.conn_managed.rcvbuf_consumed;
1294 BUG_ON(totallen > 1024 * 1024 * 1024);
1295 len = totallen;
1296 if (len > bufleft)
1297 len = bufleft;
1299 BUG_ON(len <= 0);
1300 BUG_ON(cs->data.conn_managed.rcv_buf == 0);
1302 st_rc = copy_to_iter(cs->data.conn_managed.rcv_buf +
1303 cs->data.conn_managed.rcvbuf_consumed, len,
1304 &msg->msg_iter);
1306 if (unlikely(st_rc != len)) {
1307 rc = -EFAULT;
1308 goto out;
1311 if (likely(peek == 0))
1312 cs->data.conn_managed.rcvbuf_consumed += (__u16) len;
1314 rc = len;
1316 out:
1317 mutex_unlock(&cs->lock);
1319 return rc;
1322 int cor_mngdsocket_recvmsg(struct socket *sock, struct msghdr *msg,
1323 size_t total_len, int flags)
1325 struct cor_sock *cs = (struct cor_sock *) sock->sk;
1327 int blocking = (flags & MSG_DONTWAIT) == 0;
1328 int peek = (flags & MSG_PEEK) != 0;
1330 int rc = 0;
1331 int copied = 0;
1332 __u32 max = (1024 * 1024 * 1024);
1333 __u32 totallen;
1335 totallen = total_len;
1336 if (unlikely(totallen > max || total_len > max))
1337 totallen = max;
1338 if (unlikely(peek != 0) && totallen > 1)
1339 totallen = 1;
1342 while (copied < totallen) {
1343 rc = _cor_mngdsocket_recvmsg(msg, totallen - copied, cs,
1344 copied == 0, peek);
1346 if (rc == -EAGAIN && blocking && copied == 0) {
1347 long waitret;
1349 waitret = wait_event_interruptible_timeout(
1350 *sk_sleep(&cs->sk),
1351 atomic_read(&cs->ready_to_read) != 0,
1352 cs->sk.sk_rcvtimeo);
1354 if (unlikely(waitret < 0))
1355 rc = sock_intr_errno(cs->sk.sk_rcvtimeo);
1356 else if (unlikely(waitret == 0))
1357 rc = -ETIMEDOUT;
1358 else
1359 continue;
1362 if (rc > 0 || copied == 0)
1363 copied += rc;
1364 if (unlikely(rc == -EFAULT))
1365 copied = rc;
1366 if (rc <= 0)
1367 break;
1369 BUG_ON(copied > 0 && ((__u32) copied > totallen));
1372 BUG_ON(copied > 0 && unlikely((copied > total_len ||
1373 copied > totallen)));
1375 return copied;
1378 static unsigned int cor_mngdsocket_poll(struct file *file, struct socket *sock,
1379 poll_table *wait)
1381 unsigned int mask = 0;
1383 struct cor_sock *cs = (struct cor_sock *) sock->sk;
1385 sock_poll_wait(file, sock, wait);
1387 mutex_lock(&cs->lock);
1389 if (cs->type == CS_TYPE_UNCONNECTED) {
1390 mask = U32_MAX;
1391 } else if (cs->type == CS_TYPE_LISTENER) {
1392 spin_lock_bh(&cor_bindnodes);
1393 if (unlikely(cs->data.listener.queue_maxlen <= 0))
1394 mask = U32_MAX;
1395 else if (list_empty(&cs->data.listener.conn_queue) == 0)
1396 mask |= (POLLIN | POLLRDNORM);
1397 spin_unlock_bh(&cor_bindnodes);
1398 } else if (cs->type == CS_TYPE_CONN_MANAGED) {
1399 struct cor_conn *src_sock = cs->data.conn_managed.src_sock;
1401 if (unlikely(unlikely(cs->data.conn_managed.is_reset != 0) ||
1402 unlikely(cs->data.conn_managed.connect_state ==
1403 CS_CONNECTSTATE_ERROR))) {
1404 mask = U32_MAX;
1405 goto out;
1408 if (unlikely(cs->data.conn_managed.connect_state !=
1409 CS_CONNECTSTATE_CONNECTED))
1410 goto out;
1412 if (cor_mngdsocket_readfromconn(cs) != RC_RFC_INCOMPLETE)
1413 mask |= (POLLIN | POLLRDNORM);
1416 if (unlikely(src_sock == 0)) {
1417 mask = U32_MAX;
1418 goto out;
1421 spin_lock_bh(&src_sock->rcv_lock);
1422 if (unlikely(src_sock->isreset != 0 ||
1423 cor_is_src_sock(src_sock, cs) == 0)) {
1424 mask = U32_MAX;
1425 } else if (cor_sock_sndbufavailable(src_sock, 1) != 0) {
1426 mask |= (POLLOUT | POLLWRNORM);
1428 spin_unlock_bh(&src_sock->rcv_lock);
1429 } else {
1430 BUG();
1433 out:
1434 mutex_unlock(&cs->lock);
1436 return mask;
1439 const struct proto_ops cor_mngd_proto_ops = {
1440 .family = PF_COR,
1441 .owner = THIS_MODULE,
1442 .release = cor_mngdsocket_release,
1443 .bind = cor_mngdsocket_bind,
1444 .connect = cor_mngdsocket_connect,
1445 .accept = cor_mngdsocket_accept,
1446 .listen = cor_mngdsocket_listen,
1447 .shutdown = cor_mngdsocket_shutdown,
1448 .ioctl = cor_mngdsocket_ioctl,
1449 .setsockopt = cor_mngdsocket_setsockopt,
1450 .getsockopt = cor_mngdsocket_getsockopt,
1451 #ifdef CONFIG_COMPAT
1452 .combat_ioctl = cor_mngdsocket_ioctl,
1453 .compat_setsockopt = cor_mngdsocket_setsockopt,
1454 .compat_getsockopt = cor_mngdsocket_getsockopt,
1455 #endif
1456 .sendmsg = cor_mngdsocket_sendmsg,
1457 .recvmsg = cor_mngdsocket_recvmsg,
1458 .poll = cor_mngdsocket_poll,
1459 .socketpair = cor_socket_socketpair,
1460 .getname = cor_socket_getname,
1461 .mmap = cor_socket_mmap,
1463 /* sendpage, splice_read, are optional */
1466 int cor_create_managed_sock(struct net *net, struct socket *sock, int protocol,
1467 int kern)
1469 int rc = _cor_createsock(net, sock, protocol, kern, 1);
1471 if (rc != 0)
1472 return rc;
1474 sock->ops = &cor_mngd_proto_ops;
1476 return 0;
1479 /* static void __init test_chksum(void)
1481 char buf[5];
1482 buf[4] = 0;
1484 cor_mngdsocket_chksum("12", 2, "3456789", 7, &buf[0], 4);
1485 printk(KERN_ERR "test_chksum %hhx %hhx %hhx %hhx %hhx\n", buf[0],
1486 buf[1], buf[2], buf[3], buf[4]); // 83 92 6 e3
1487 cor_mngdsocket_chksum("123456789", 9, "", 0, &buf[0], 4);
1488 printk(KERN_ERR "test_chksum %hhx %hhx %hhx %hhx %hhx\n", buf[0],
1489 buf[1], buf[2], buf[3], buf[4]); // 83 92 6 e3
1490 } */
1492 int __init cor_sock_managed_init1(void)
1494 memset(&cor_sock_cookie_rb, 0, sizeof(cor_sock_cookie_rb));
1496 /* test_chksum(); */
1498 return 0;
1501 MODULE_LICENSE("GPL");