2 * Connection oriented routing
3 * Copyright (C) 2007-2021 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/net.h>
18 #include <linux/uaccess.h>
20 #include <linux/crc32c.h>
21 #include <linux/err.h>
22 #include <linux/scatterlist.h>
27 static DEFINE_SPINLOCK(cor_cookie_gen
);
29 static DEFINE_SPINLOCK(cor_sock_cookie_lock
);
30 static struct rb_root cor_sock_cookie_rb
;
32 #warning todo which lock protects sk_err, sk_rcvtimeo and sk_sndtimeo ???
35 struct cor_sock
*cor_get_sock_by_cookie(__be64 cookie
)
37 struct rb_node
*n
= 0;
38 struct cor_sock
*ret
= 0;
40 spin_lock_bh(&cor_sock_cookie_lock
);
42 n
= cor_sock_cookie_rb
.rb_node
;
44 while (likely(n
!= 0) && ret
== 0) {
45 struct cor_sock
*cs
= container_of(n
, struct cor_sock
,
46 data
.conn_managed
.rbn
);
48 BUG_ON(cs
->type
!= CS_TYPE_CONN_MANAGED
);
49 BUG_ON(cs
->data
.conn_managed
.cookie
== 0);
51 if (cookie
< cs
->data
.conn_managed
.cookie
)
53 else if (cookie
> cs
->data
.conn_managed
.cookie
)
62 spin_unlock_bh(&cor_sock_cookie_lock
);
67 static void cor_insert_sock_cookie(struct cor_sock
*ins_l
)
71 struct rb_node
*parent
= 0;
73 __u64 cookie
= ins_l
->data
.conn_managed
.cookie
;
75 BUG_ON(ins_l
->type
!= CS_TYPE_CONN_MANAGED
);
76 BUG_ON(ins_l
->data
.conn_managed
.cookie
== 0);
78 spin_lock_bh(&cor_sock_cookie_lock
);
80 root
= &cor_sock_cookie_rb
;
84 struct cor_sock
*curr
= container_of(*p
,
85 struct cor_sock
, data
.conn_managed
.rbn
);
87 BUG_ON(curr
->type
!= CS_TYPE_CONN_MANAGED
);
88 BUG_ON(curr
->data
.conn_managed
.cookie
== 0);
91 if (unlikely(cookie
== curr
->data
.conn_managed
.cookie
)) {
93 } else if (cookie
< curr
->data
.conn_managed
.cookie
) {
95 } else if (cookie
> curr
->data
.conn_managed
.cookie
) {
100 kref_get(&ins_l
->ref
);
101 rb_link_node(&ins_l
->data
.conn_managed
.rbn
, parent
, p
);
102 rb_insert_color(&ins_l
->data
.conn_managed
.rbn
, root
);
104 spin_unlock_bh(&cor_sock_cookie_lock
);
107 static int cor_alloc_corsock_cookie(struct cor_sock
*cs_m_l
)
112 BUG_ON(cs_m_l
->type
!= CS_TYPE_CONN_MANAGED
);
113 BUG_ON(cs_m_l
->data
.conn_managed
.cookie
!= 0);
115 spin_lock_bh(&cor_cookie_gen
);
116 for (i
= 0; i
< 16; i
++) {
117 struct cor_sock
*cs2
= 0;
120 get_random_bytes((char *) &cookie
, sizeof(cookie
));
122 if (unlikely(cookie
== 0))
125 cs2
= cor_get_sock_by_cookie(cookie
);
126 if (unlikely(cs2
!= 0)) {
127 kref_put(&cs2
->ref
, cor_free_sock
);
133 spin_unlock_bh(&cor_cookie_gen
);
137 cs_m_l
->data
.conn_managed
.cookie
= cookie
;
138 cor_insert_sock_cookie(cs_m_l
);
139 spin_unlock_bh(&cor_cookie_gen
);
143 static void _cor_mngdsocket_shutdown(struct cor_sock
*cs_m_l
, int flags
);
145 static int cor_mngdsocket_closefinished(struct cor_sock
*cs_m
)
149 mutex_lock(&cs_m
->lock
);
150 BUG_ON(cs_m
->type
!= CS_TYPE_CONN_MANAGED
);
152 if (cs_m
->data
.conn_managed
.rcvd_eof
!= 0 &&
153 cs_m
->data
.conn_managed
.rcvd_rcvend
!= 0) {
155 } else if (cs_m
->data
.conn_managed
.src_sock
== 0 ||
156 cs_m
->data
.conn_managed
.trgt_sock
== 0 ||
157 cs_m
->data
.conn_managed
.is_reset
!= 0) {
160 spin_lock_bh(&cs_m
->data
.conn_managed
.src_sock
->rcv_lock
);
161 if (cs_m
->data
.conn_managed
.src_sock
->isreset
!= 0)
163 spin_unlock_bh(&cs_m
->data
.conn_managed
.src_sock
->rcv_lock
);
166 mutex_unlock(&cs_m
->lock
);
171 static void cor_mngdsocket_release_closewait(struct cor_sock
*cs_m
,
174 while (cor_mngdsocket_closefinished(cs_m
) == 0) {
177 if (atomic_read(&cs_m
->ready_to_read
) != 0)
180 waitret
= wait_event_interruptible_timeout(
181 *sk_sleep(&cs_m
->sk
),
182 atomic_read(&cs_m
->ready_to_read
) != 0,
190 static void cor_mngdsocket_release_mngd(struct cor_sock
*cs_m
)
194 mutex_lock(&cs_m
->lock
);
195 BUG_ON(cs_m
->type
!= CS_TYPE_CONN_MANAGED
);
198 if (sock_flag(&cs_m
->sk
, SOCK_LINGER
) &&
199 !(current
->flags
& PF_EXITING
))
200 timeout
= cs_m
->sk
.sk_lingertime
;
203 _cor_mngdsocket_shutdown(cs_m
, SHUT_RDWR
);
204 mutex_unlock(&cs_m
->lock
);
206 cor_mngdsocket_release_closewait(cs_m
, timeout
);
208 mutex_lock(&cs_m
->lock
);
209 BUG_ON(cs_m
->type
!= CS_TYPE_CONN_MANAGED
);
212 cs_m
->isreleased
= 1;
214 if (cs_m
->data
.conn_managed
.src_sock
!= 0 &&
215 cs_m
->data
.conn_managed
.trgt_sock
!= 0) {
216 cor_reset_conn(cs_m
->data
.conn_managed
.src_sock
);
218 cor_conn_kref_put_bug(cs_m
->data
.conn_managed
.src_sock
,
220 cor_conn_kref_put(cs_m
->data
.conn_managed
.trgt_sock
,
223 cs_m
->data
.conn_managed
.src_sock
= 0;
224 cs_m
->data
.conn_managed
.trgt_sock
= 0;
226 mutex_unlock(&cs_m
->lock
);
228 cor_usersock_release(cs_m
);
230 mutex_lock(&cs_m
->lock
);
231 BUG_ON(cs_m
->type
!= CS_TYPE_CONN_MANAGED
);
232 if (cs_m
->data
.conn_managed
.cookie
!= 0) {
233 spin_lock_bh(&cor_sock_cookie_lock
);
234 rb_erase(&cs_m
->data
.conn_managed
.rbn
, &cor_sock_cookie_rb
);
235 kref_put(&cs_m
->ref
, cor_kreffree_bug
);
236 spin_unlock_bh(&cor_sock_cookie_lock
);
237 cs_m
->data
.conn_managed
.cookie
= 0;
239 mutex_unlock(&cs_m
->lock
);
242 int cor_mngdsocket_release(struct socket
*sock
)
244 struct cor_sock
*cs
= (struct cor_sock
*) sock
->sk
;
248 /* accept may return before newsock is initialised */
252 mutex_lock(&cs
->lock
);
254 if (type
!= CS_TYPE_CONN_MANAGED
)
256 mutex_unlock(&cs
->lock
);
258 if (type
== CS_TYPE_UNCONNECTED
) {
259 } else if (type
== CS_TYPE_LISTENER
) {
261 } else if (type
== CS_TYPE_CONN_MANAGED
) {
262 cor_mngdsocket_release_mngd(cs
);
267 kref_put(&cs
->ref
, cor_free_sock
);
272 int cor_mngdsocket_bind(struct socket
*sock
, struct sockaddr
*saddr
,
276 struct cor_sock
*cs
= (struct cor_sock
*) sock
->sk
;
277 struct cor_sockaddr
*addr
= (struct cor_sockaddr
*) saddr
;
279 if (unlikely(sockaddr_len
< sizeof(struct cor_sockaddr
)))
282 if (unlikely(addr
->sin_family
!= AF_COR
))
285 if (unlikely(be64_to_cpu(addr
->addr
) != 0))
288 if (unlikely(be32_to_cpu(addr
->port
) == 0))
291 mutex_lock(&cs
->lock
);
292 if (unlikely(cs
->type
!= CS_TYPE_UNCONNECTED
))
295 rc
= cor_open_port(cs
, addr
->port
);
296 mutex_unlock(&cs
->lock
);
301 static int cor_mngdsocket_init_conn_managed(struct cor_sock
*cs_l
,
302 char *rcvbuf
, char *sndbuf
)
306 BUG_ON(cs_l
->type
!= CS_TYPE_CONN_MANAGED
);
308 memset(&cs_l
->data
.conn_managed
, 0, sizeof(cs_l
->data
.conn_managed
));
310 INIT_LIST_HEAD(&cs_l
->data
.conn_managed
.rd_msgs
);
311 cs_l
->data
.conn_managed
.rcv_buf
= rcvbuf
;
312 cs_l
->data
.conn_managed
.rcv_buf_state
= RCV_BUF_STATE_INCOMPLETE
;
313 cs_l
->data
.conn_managed
.snd_buf
= sndbuf
;
314 cs_l
->data
.conn_managed
.snd_segment_size
= CONN_MNGD_MAX_SEGMENT_SIZE
;
319 static int cor_mngdsocket_connect(struct socket
*sock
,
320 struct sockaddr
*saddr
, int sockaddr_len
, int flags
)
322 struct cor_sock
*cs
= (struct cor_sock
*) sock
->sk
;
323 struct cor_sockaddr
*addr
= (struct cor_sockaddr
*) saddr
;
330 if (unlikely(sockaddr_len
< sizeof(struct cor_sockaddr
)))
333 if (unlikely(addr
->sin_family
!= AF_COR
))
336 rcvbuf
= kmalloc(CONN_MNGD_MAX_SEGMENT_SIZE
, GFP_KERNEL
);
337 if (unlikely(rcvbuf
== 0))
340 sndbuf
= kmalloc(CONN_MNGD_MAX_SEGMENT_SIZE
, GFP_KERNEL
);
341 if (unlikely(sndbuf
== 0)) {
346 mutex_lock(&cs
->lock
);
347 if (unlikely(cs
->type
!= CS_TYPE_UNCONNECTED
)) {
348 mutex_unlock(&cs
->lock
);
354 cs
->type
= CS_TYPE_CONN_MANAGED
;
355 rc
= cor_mngdsocket_init_conn_managed(cs
, rcvbuf
, sndbuf
);
356 if (unlikely(rc
!= 0))
359 rc
= cor_alloc_corsock_cookie(cs
);
360 if (unlikely(rc
!= 0)) {
362 cs
->type
= CS_TYPE_UNCONNECTED
;
363 mutex_unlock(&cs
->lock
);
366 cs
->data
.conn_managed
.rcv_buf
= 0;
367 cs
->data
.conn_managed
.snd_buf
= 0;
371 memcpy(&cs
->data
.conn_managed
.remoteaddr
, addr
,
372 sizeof(struct cor_sockaddr
));
374 cs
->data
.conn_managed
.connect_state
= CS_CONNECTSTATE_CONNECTING
;
376 mutex_unlock(&cs
->lock
);
379 sock
->state
= SS_CONNECTING
;
380 release_sock(&cs
->sk
);
382 rc
= cor_rdreq_connect(cs
);
384 if (unlikely(rc
!= -EINPROGRESS
)) {
385 mutex_lock(&cs
->lock
);
386 cs
->data
.conn_managed
.connect_state
=
387 CS_CONNECTSTATE_ERROR
;
388 mutex_unlock(&cs
->lock
);
392 if ((sock
->file
->f_flags
& O_NONBLOCK
) != 0)
398 mutex_lock(&cs
->lock
);
399 if (cs
->data
.conn_managed
.connect_state
!=
400 CS_CONNECTSTATE_CONNECTING
) {
401 mutex_unlock(&cs
->lock
);
405 atomic_set(&cs
->ready_to_write
, 0);
407 mutex_unlock(&cs
->lock
);
409 waitret
= wait_event_interruptible_timeout(
411 atomic_read(&cs
->ready_to_write
) != 0,
414 if (unlikely(waitret
< 0))
415 return sock_intr_errno(cs
->sk
.sk_sndtimeo
);
416 else if (unlikely(waitret
== 0))
421 return sock_error(&cs
->sk
);
424 int cor_mngdsocket_accept(struct socket
*sock
, struct socket
*newsock
,
425 int flags
, bool kern
)
427 struct cor_sock
*cs
= (struct cor_sock
*) sock
->sk
;
431 struct cor_conn
*src_sock_o
;
432 struct cor_conn
*trgt_sock_o
;
434 struct cor_sock
*newcs
;
436 rcvbuf
= kmalloc(CONN_MNGD_MAX_SEGMENT_SIZE
, GFP_KERNEL
);
437 if (unlikely(rcvbuf
== 0))
440 sndbuf
= kmalloc(CONN_MNGD_MAX_SEGMENT_SIZE
, GFP_KERNEL
);
441 if (unlikely(sndbuf
== 0)) {
446 mutex_lock(&cs
->lock
);
448 BUG_ON(cs
->type
!= CS_TYPE_UNCONNECTED
&&
449 cs
->type
!= CS_TYPE_LISTENER
&&
450 cs
->type
!= CS_TYPE_CONN_MANAGED
);
452 if (unlikely(cs
->type
!= CS_TYPE_LISTENER
)) {
453 mutex_unlock(&cs
->lock
);
459 spin_lock_bh(&cor_bindnodes
);
460 if (unlikely(cs
->data
.listener
.queue_maxlen
<= 0)) {
461 spin_unlock_bh(&cor_bindnodes
);
462 mutex_unlock(&cs
->lock
);
468 while (list_empty(&cs
->data
.listener
.conn_queue
)) {
469 atomic_set(&cs
->ready_to_accept
, 0);
470 spin_unlock_bh(&cor_bindnodes
);
471 mutex_unlock(&cs
->lock
);
473 if ((flags
& O_NONBLOCK
) != 0)
476 if (wait_event_interruptible(*sk_sleep(&cs
->sk
),
477 atomic_read(&cs
->ready_to_accept
) != 0) !=
483 mutex_lock(&cs
->lock
);
484 spin_lock_bh(&cor_bindnodes
);
487 src_sock_o
= container_of(cs
->data
.listener
.conn_queue
.next
,
488 struct cor_conn
, src
.sock
.cl_list
);
490 /* cor_reset_conn(src_sock_o); */ //testing
492 BUG_ON(src_sock_o
->src
.sock
.in_cl_list
== 0);
493 list_del(&src_sock_o
->src
.sock
.cl_list
);
494 src_sock_o
->src
.sock
.in_cl_list
= 0;
496 cs
->data
.listener
.queue_len
--;
498 spin_unlock_bh(&cor_bindnodes
);
499 mutex_unlock(&cs
->lock
);
501 spin_lock_bh(&src_sock_o
->rcv_lock
);
502 trgt_sock_o
= cor_get_conn_reversedir(src_sock_o
);
503 spin_unlock_bh(&src_sock_o
->rcv_lock
);
505 /* kern = 0 - ugly, bit af_unix does it too... */
506 rc
= _cor_createsock(sock_net(sock
->sk
), newsock
,
507 cs
->sk
.sk_protocol
, 0, 0);
509 if (unlikely(rc
!= 0)) {
510 cor_reset_conn(src_sock_o
);
511 cor_conn_kref_put(src_sock_o
, "conn_queue");
514 printk(KERN_ERR
"cor: _cor_createsock() failed, connection resetted\n");
518 newcs
= (struct cor_sock
*) newsock
->sk
;
520 if (unlikely(newcs
== 0)) {
521 cor_reset_conn(src_sock_o
);
522 cor_conn_kref_put(src_sock_o
, "conn_queue");
525 printk(KERN_ERR
"cor: newsock->sk is null, connection resetted\n");
529 mutex_lock(&newcs
->lock
);
530 spin_lock_bh(&trgt_sock_o
->rcv_lock
);
531 spin_lock_bh(&src_sock_o
->rcv_lock
);
533 BUG_ON(trgt_sock_o
->is_client
== 0);
534 BUG_ON(src_sock_o
->is_client
!= 0);
536 BUG_ON(trgt_sock_o
->targettype
!= TARGET_SOCK
);
537 BUG_ON(src_sock_o
->sourcetype
!= SOURCE_SOCK
);
539 BUG_ON(newcs
->type
!= CS_TYPE_UNCONNECTED
);
541 newcs
->type
= CS_TYPE_CONN_MANAGED
;
542 rc
= cor_mngdsocket_init_conn_managed(newcs
, rcvbuf
, sndbuf
);
543 if (unlikely(rc
!= 0)) {
544 cor_reset_conn(src_sock_o
);
545 cor_conn_kref_put(src_sock_o
, "conn_queue");
548 printk(KERN_ERR
"cor: cor_mngdsocket_init_conn_managed() failed, connection resetted\n");
552 newcs
->data
.conn_managed
.src_sock
= src_sock_o
;
553 newcs
->data
.conn_managed
.trgt_sock
= trgt_sock_o
;
554 cor_conn_kref_get(src_sock_o
, "socket");
555 cor_conn_kref_get(trgt_sock_o
, "socket");
557 /* we will notice resetted conns when we try to use it */
558 if (likely(src_sock_o
->isreset
== 0)) {
559 src_sock_o
->src
.sock
.ed
->cs
= newcs
;
560 trgt_sock_o
->trgt
.sock
.cs
= newcs
;
561 kref_get(&newcs
->ref
);
562 kref_get(&newcs
->ref
);
564 BUG_ON(newcs
->data
.conn_managed
.rcv_buf
== 0);
565 src_sock_o
->src
.sock
.socktype
= SOCKTYPE_MANAGED
;
566 trgt_sock_o
->trgt
.sock
.socktype
= SOCKTYPE_MANAGED
;
567 trgt_sock_o
->trgt
.sock
.rcv_buf_state
=
568 RCV_BUF_STATE_INCOMPLETE
;
569 trgt_sock_o
->trgt
.sock
.rcv_buf
=
570 newcs
->data
.conn_managed
.rcv_buf
;
571 trgt_sock_o
->trgt
.sock
.rcvd
= 0;
573 BUG_ON(src_sock_o
->src
.sock
.keepalive_intransit
!= 0);
574 src_sock_o
->src
.sock
.ed
->jiffies_keepalive_lastact
=
575 jiffies
- KEEPALIVE_INTERVAL_SECS
* HZ
+ HZ
;
576 cor_keepalive_req_sched_timer(src_sock_o
);
579 newcs
->data
.conn_managed
.connect_state
= CS_CONNECTSTATE_CONNECTED
;
581 spin_unlock_bh(&src_sock_o
->rcv_lock
);
582 spin_unlock_bh(&trgt_sock_o
->rcv_lock
);
583 mutex_unlock(&newcs
->lock
);
585 newsock
->ops
= sock
->ops
;
586 newsock
->sk
= (struct sock
*) newcs
;
587 newsock
->state
= SS_CONNECTED
;
589 cor_conn_kref_put(src_sock_o
, "conn_queue");
594 int cor_mngdsocket_listen(struct socket
*sock
, int len
)
596 struct cor_sock
*cs
= (struct cor_sock
*) sock
->sk
;
598 mutex_lock(&cs
->lock
);
599 spin_lock_bh(&cor_bindnodes
);
601 BUG_ON(cs
->type
!= CS_TYPE_UNCONNECTED
&&
602 cs
->type
!= CS_TYPE_LISTENER
&&
603 cs
->type
!= CS_TYPE_CONN_MANAGED
);
605 if (unlikely(cs
->type
!= CS_TYPE_LISTENER
)) {
606 mutex_unlock(&cs
->lock
);
613 cs
->data
.listener
.queue_maxlen
= len
;
615 spin_unlock_bh(&cor_bindnodes
);
616 mutex_unlock(&cs
->lock
);
621 static void _cor_mngdsocket_shutdown(struct cor_sock
*cs_m_l
, int flags
)
624 __u8 send_rcvend
= 0;
626 BUG_ON(cs_m_l
->type
!= CS_TYPE_CONN_MANAGED
);
628 if (flags
== SHUT_RD
|| flags
== SHUT_RDWR
) {
629 if (cs_m_l
->data
.conn_managed
.sent_rcvend
== 0) {
631 cs_m_l
->data
.conn_managed
.sent_rcvend
= 1;
634 cs_m_l
->data
.conn_managed
.shutdown_rd
= 1;
637 if (flags
== SHUT_WR
|| flags
== SHUT_RDWR
) {
638 if (cs_m_l
->data
.conn_managed
.sent_eof
== 0) {
640 cs_m_l
->data
.conn_managed
.sent_eof
= 1;
642 cs_m_l
->data
.conn_managed
.shutdown_wr
= 1;
644 cs_m_l
->data
.conn_managed
.flush
= 1;
647 if (send_eof
!= 0 || send_rcvend
!= 0)
648 cor_mngdsocket_flushtoconn_ctrl(cs_m_l
, send_eof
, send_rcvend
,
652 int cor_mngdsocket_shutdown(struct socket
*sock
, int flags
)
654 struct cor_sock
*cs
= (struct cor_sock
*) sock
->sk
;
656 mutex_lock(&cs
->lock
);
658 BUG_ON(cs
->type
!= CS_TYPE_UNCONNECTED
&&
659 cs
->type
!= CS_TYPE_LISTENER
&&
660 cs
->type
!= CS_TYPE_CONN_MANAGED
);
662 if (unlikely(cs
->type
== CS_TYPE_UNCONNECTED
)) {
663 mutex_unlock(&cs
->lock
);
665 } else if (unlikely(cs
->type
!= CS_TYPE_CONN_MANAGED
)) {
666 mutex_unlock(&cs
->lock
);
670 _cor_mngdsocket_shutdown(cs
, flags
);
672 mutex_unlock(&cs
->lock
);
677 int cor_mngdsocket_ioctl(struct socket
*sock
, unsigned int cmd
,
683 static int cor_mngdsocket_setsockopt_publishservice(struct socket
*sock
,
684 char __user
*optval
, unsigned int optlen
)
686 struct cor_sock
*cs
= (struct cor_sock
*) sock
->sk
;
690 if (unlikely(optlen
!= 4))
693 notread
= copy_from_user(&publish
, optval
, 4);
694 if (unlikely(notread
!= 0))
697 if (publish
!= 0 && publish
!= 1)
700 cor_set_publish_service(cs
, (__u8
) publish
);
705 int cor_mngdsocket_setsockopt(struct socket
*sock
, int level
,
706 int optname
, char __user
*optval
, unsigned int optlen
)
708 if (unlikely(level
!= SOL_COR
))
711 if (optname
== COR_PUBLISH_SERVICE
) {
712 return cor_mngdsocket_setsockopt_publishservice(sock
, optval
,
714 } else if (optname
== COR_TOS
) {
715 return cor_socket_setsockopt_tos(sock
, optval
, optlen
);
716 } else if (optname
== COR_PRIORITY
) {
717 return cor_socket_setsockopt_priority(sock
, optval
, optlen
);
723 int cor_mngdsocket_getsockopt(struct socket
*sock
, int level
,
724 int optname
, char __user
*optval
, int __user
*optlen
)
729 void __cor_set_sock_connecterror(struct cor_sock
*cs_m_l
, int errorno
)
731 BUG_ON(cs_m_l
->type
!= CS_TYPE_CONN_MANAGED
);
733 if (unlikely(unlikely(cs_m_l
->isreleased
!= 0) ||
734 unlikely(cs_m_l
->data
.conn_managed
.connect_state
!=
735 CS_CONNECTSTATE_CONNECTING
)))
738 cs_m_l
->data
.conn_managed
.connect_state
= CS_CONNECTSTATE_ERROR
;
740 lock_sock(&cs_m_l
->sk
);
741 xchg(&cs_m_l
->sk
.sk_err
, errorno
);
742 release_sock(&cs_m_l
->sk
);
744 atomic_set(&cs_m_l
->ready_to_read
, 1);
745 atomic_set(&cs_m_l
->ready_to_write
, 1);
746 atomic_set(&cs_m_l
->ready_to_accept
, 1);
748 cs_m_l
->sk
.sk_state_change(&cs_m_l
->sk
);
751 void _cor_set_sock_connecterror(struct cor_sock
*cs
, int errorno
)
753 BUG_ON(errorno
== 0);
758 mutex_lock(&cs
->lock
);
759 __cor_set_sock_connecterror(cs
, errorno
);
760 mutex_unlock(&cs
->lock
);
763 void cor_mngdsocket_chksum(char *hdr
, __u32 hdrlen
,
764 char *data
, __u32 datalen
,
765 char *chksum
, __u32 chksum_len
)
769 BUG_ON(chksum_len
!= 4);
771 crc
= crc32c(crc
, hdr
, hdrlen
);
772 crc
= crc32c(crc
, data
, datalen
);
774 cor_put_u32(chksum
, crc
);
777 static int cor_mngdsocket_check_connected(struct cor_sock
*cs_l
)
779 BUG_ON(cs_l
->type
!= CS_TYPE_UNCONNECTED
&&
780 cs_l
->type
!= CS_TYPE_LISTENER
&&
781 cs_l
->type
!= CS_TYPE_CONN_MANAGED
);
782 if (unlikely(cs_l
->type
== CS_TYPE_UNCONNECTED
)) {
784 } else if (unlikely(cs_l
->type
!= CS_TYPE_CONN_MANAGED
)) {
786 } else if (unlikely(cs_l
->data
.conn_managed
.connect_state
!=
787 CS_CONNECTSTATE_CONNECTED
)) {
791 if (unlikely(cs_l
->data
.conn_managed
.is_reset
!= 0))
797 static __u32
cor_get_segment_size(__u32 sndspeed_limited
, __u8 is_highlatency
,
803 if (sndspeed_limited
>= 4096 * 300)
804 return 4096 - l4overhead
;
805 else if (sndspeed_limited
>= 2048 * 200)
806 return 2048 - l4overhead
;
807 else if (sndspeed_limited
>= 1024 * 100)
808 return 1024 - l4overhead
;
809 else if (sndspeed_limited
>= 512 * 40)
810 return 512 - l4overhead
;
811 else if (sndspeed_limited
>= 256 * 10)
812 return 256 - l4overhead
;
814 return 128 - l4overhead
;
817 static int __cor_mngdsocket_sendmsg(struct msghdr
*msg
, __u32 totallen
,
818 struct cor_sock
*cs_m_l
, __u32 sndspeed_limited
)
820 __u32 len
= totallen
;
824 if (cs_m_l
->data
.conn_managed
.snd_data_len
== 0)
825 cs_m_l
->data
.conn_managed
.snd_segment_size
=
826 cor_get_segment_size(sndspeed_limited
,
827 cs_m_l
->is_highlatency
, 6);
829 BUG_ON(cs_m_l
->data
.conn_managed
.snd_segment_size
>
830 CONN_MNGD_MAX_SEGMENT_SIZE
);
832 BUG_ON(totallen
> (1024 * 1024 * 1024));
834 BUG_ON(cs_m_l
->type
!= CS_TYPE_CONN_MANAGED
);
835 BUG_ON(cs_m_l
->data
.conn_managed
.send_in_progress
!= 0);
837 BUG_ON(cs_m_l
->data
.conn_managed
.snd_segment_size
<=
838 cs_m_l
->data
.conn_managed
.snd_data_len
);
840 bufleft
= cs_m_l
->data
.conn_managed
.snd_segment_size
-
841 cs_m_l
->data
.conn_managed
.snd_data_len
;
845 BUG_ON(len
>= 65536);
847 st_rc
= copy_from_iter(cs_m_l
->data
.conn_managed
.snd_buf
+
848 cs_m_l
->data
.conn_managed
.snd_data_len
, len
,
851 if (unlikely(st_rc
!= len
))
854 cs_m_l
->data
.conn_managed
.snd_data_len
+= (__u16
) len
;
859 static int _cor_mngdsocket_sendmsg(struct msghdr
*msg
, __u32 totallen
,
860 __u32
*iovidx
, __u32
*iovread
, struct cor_sock
*cs
,
864 struct cor_conn
*src_sock
;
865 __u32 sndspeed_limited
;
867 mutex_lock(&cs
->lock
);
869 rc
= cor_mngdsocket_check_connected(cs
);
870 if (unlikely(rc
!= 0))
873 if (unlikely(cs
->data
.conn_managed
.shutdown_wr
!= 0)) {
878 BUG_ON(cs
->data
.conn_managed
.snd_data_len
>
879 cs
->data
.conn_managed
.snd_segment_size
);
881 if (unlikely(cs
->data
.conn_managed
.send_in_progress
!= 0 ||
882 cs
->data
.conn_managed
.snd_data_len
==
883 cs
->data
.conn_managed
.snd_segment_size
)) {
884 cs
->data
.conn_managed
.flush
= 0;
885 cor_mngdsocket_flushtoconn_data(cs
);
886 if (cs
->data
.conn_managed
.send_in_progress
!= 0 ||
887 cs
->data
.conn_managed
.snd_data_len
==
888 cs
->data
.conn_managed
.snd_segment_size
) {
894 src_sock
= cs
->data
.conn_managed
.src_sock
;
895 if (unlikely(src_sock
== 0)) {
900 spin_lock_bh(&src_sock
->rcv_lock
);
901 if (unlikely(src_sock
->isreset
!= 0 ||
902 cor_is_src_sock(src_sock
, cs
) == 0)) {
904 spin_unlock_bh(&src_sock
->rcv_lock
);
906 } else if (cor_sock_sndbufavailable(src_sock
, 0) == 0) {
909 atomic_set(&cs
->ready_to_write
, 0);
910 spin_unlock_bh(&src_sock
->rcv_lock
);
914 sndspeed_limited
= src_sock
->src
.sock
.ed
->snd_speed
.speed_limited
;
916 spin_unlock_bh(&src_sock
->rcv_lock
);
918 rc
= __cor_mngdsocket_sendmsg(msg
, totallen
, cs
, sndspeed_limited
);
920 cs
->data
.conn_managed
.flush
= flush
;
921 if (unlikely(likely(rc
> 0) && unlikely(rc
!= totallen
)))
922 cs
->data
.conn_managed
.flush
= 0;
924 if (flush
!= 0 || cs
->data
.conn_managed
.snd_data_len
==
925 cs
->data
.conn_managed
.snd_segment_size
) {
926 cor_mngdsocket_flushtoconn_data(cs
);
930 mutex_unlock(&cs
->lock
);
935 int cor_mngdsocket_sendmsg(struct socket
*sock
, struct msghdr
*msg
,
938 struct cor_sock
*cs
= (struct cor_sock
*) sock
->sk
;
940 __u8 flush
= ((msg
->msg_flags
& MSG_MORE
) == 0) ? 1 : 0;
941 int blocking
= (msg
->msg_flags
& MSG_DONTWAIT
) == 0;
945 __u32 max
= (1024 * 1024 * 1024);
951 totallen
= total_len
;
952 if (unlikely(totallen
> max
|| total_len
> max
)) {
957 while (rc
>= 0 && copied
< totallen
) {
958 rc
= _cor_mngdsocket_sendmsg(msg
, totallen
- copied
, &iovidx
,
959 &iovread
, cs
, flush
);
961 if (rc
== -EAGAIN
&& blocking
) {
964 waitret
= wait_event_interruptible_timeout(
966 atomic_read(&cs
->ready_to_write
) != 0,
969 if (unlikely(waitret
< 0))
970 rc
= sock_intr_errno(cs
->sk
.sk_sndtimeo
);
971 else if (unlikely(waitret
== 0))
977 if (rc
> 0 || copied
== 0)
979 if (unlikely(rc
== -EFAULT
))
982 BUG_ON(copied
> 0 && ((__u32
) copied
> totallen
));
985 BUG_ON(copied
> 0 && unlikely((copied
> total_len
||
986 copied
> totallen
)));
991 static void __cor_mngdsocket_readfromconn(struct cor_sock
*cs_m_l
,
992 struct cor_conn
*trgt_sock_l
,
993 __u8
*send_eof
, __u8
*send_rcvend
,
994 __u8
*keepalive_req_rcvd
,
995 __be32
*keepalive_req_cookie
,
996 __u8
*keepalive_resp_rcvd
,
997 __be32
*keepalive_resp_cookie
)
999 __u16 rcvbuf_consumed
= 0;
1001 BUG_ON(trgt_sock_l
->trgt
.sock
.rcv_buf_state
!= RCV_BUF_STATE_OK
);
1003 if (likely((trgt_sock_l
->trgt
.sock
.rcv_hdr_flags
&
1004 CONN_MNGD_HASDATA
) != 0)) {
1005 if (unlikely(trgt_sock_l
->trgt
.sock
.rcv_data_len
== 0 ||
1006 cs_m_l
->data
.conn_managed
.shutdown_rd
!= 0))
1007 cs_m_l
->data
.conn_managed
.rcv_data_len
= 0;
1009 cs_m_l
->data
.conn_managed
.rcv_data_len
=
1010 trgt_sock_l
->trgt
.sock
.rcv_data_len
;
1011 cs_m_l
->data
.conn_managed
.rcv_buf_state
= RCV_BUF_STATE_OK
;
1015 if (unlikely((trgt_sock_l
->trgt
.sock
.rcv_hdr_flags
&
1016 CONN_MNGD_EOF
) != 0)) {
1017 if (cs_m_l
->data
.conn_managed
.sent_rcvend
== 0) {
1019 cs_m_l
->data
.conn_managed
.sent_rcvend
= 1;
1022 cs_m_l
->data
.conn_managed
.rcvd_eof
= 1;
1025 if (unlikely((trgt_sock_l
->trgt
.sock
.rcv_hdr_flags
&
1026 CONN_MNGD_RCVEND
) != 0)) {
1027 if (cs_m_l
->data
.conn_managed
.sent_eof
== 0) {
1029 cs_m_l
->data
.conn_managed
.sent_eof
= 1;
1032 cs_m_l
->data
.conn_managed
.rcvd_rcvend
= 1;
1035 if (unlikely((trgt_sock_l
->trgt
.sock
.rcv_hdr_flags
&
1036 CONN_MNGD_KEEPALIVE_REQ
) != 0)) {
1037 BUG_ON(rcvbuf_consumed
+ 4 >
1038 trgt_sock_l
->trgt
.sock
.rcv_data_len
);
1040 *keepalive_req_cookie
= cor_parse_be32(
1041 trgt_sock_l
->trgt
.sock
.rcv_buf
+
1043 rcvbuf_consumed
+= 4;
1045 *keepalive_req_rcvd
= 1;
1048 if (unlikely((trgt_sock_l
->trgt
.sock
.rcv_hdr_flags
&
1049 CONN_MNGD_KEEPALIVE_RESP
) != 0)) {
1050 BUG_ON(rcvbuf_consumed
+ 4 >
1051 trgt_sock_l
->trgt
.sock
.rcv_data_len
);
1053 *keepalive_resp_cookie
= cor_parse_be32(
1054 trgt_sock_l
->trgt
.sock
.rcv_buf
+
1056 rcvbuf_consumed
+= 4;
1058 *keepalive_resp_rcvd
= 1;
1061 BUG_ON(rcvbuf_consumed
!= trgt_sock_l
->trgt
.sock
.rcv_data_len
);
1063 trgt_sock_l
->trgt
.sock
.rcv_buf_state
= RCV_BUF_STATE_INCOMPLETE
;
1064 trgt_sock_l
->trgt
.sock
.rcvd
= 0;
1067 static void _cor_mngdsocket_readfromconn(struct cor_sock
*cs_m_l
)
1069 __u8 do_wake_sender
= 0;
1070 int reset_needed
= 0;
1072 __u8 send_rcvend
= 0;
1073 __u8 keepalive_req_rcvd
= 0;
1074 __be32 keepalive_req_cookie
= 0;
1075 __u8 keepalive_resp_rcvd
= 0;
1076 __be32 keepalive_resp_cookie
= 0;
1078 struct cor_conn
*trgt_sock
= cs_m_l
->data
.conn_managed
.trgt_sock
;
1080 spin_lock_bh(&trgt_sock
->rcv_lock
);
1082 if (unlikely(cor_is_trgt_sock(trgt_sock
, cs_m_l
) == 0))
1085 cs_m_l
->is_highlatency
= trgt_sock
->is_highlatency
;
1087 if (unlikely(trgt_sock
->isreset
!= 0))
1090 BUG_ON(trgt_sock
->trgt
.sock
.socktype
!= SOCKTYPE_MANAGED
);
1091 BUG_ON(trgt_sock
->trgt
.sock
.rcv_buf
== 0);
1092 BUG_ON(trgt_sock
->trgt
.sock
.rcv_buf
!=
1093 cs_m_l
->data
.conn_managed
.rcv_buf
);
1095 if (cs_m_l
->data
.conn_managed
.rcv_buf_state
== RCV_BUF_STATE_OK
) {
1096 cs_m_l
->data
.conn_managed
.rcv_data_len
= 0;
1097 cs_m_l
->data
.conn_managed
.rcvbuf_consumed
= 0;
1098 cs_m_l
->data
.conn_managed
.rcv_buf_state
=
1099 RCV_BUF_STATE_INCOMPLETE
;
1101 trgt_sock
->trgt
.sock
.rcv_buf_state
=
1102 RCV_BUF_STATE_INCOMPLETE
;
1103 trgt_sock
->trgt
.sock
.rcvd
= 0;
1106 while (cs_m_l
->data
.conn_managed
.rcv_buf_state
==
1107 RCV_BUF_STATE_INCOMPLETE
) {
1108 cor_flush_sock_managed(trgt_sock
, 1, &do_wake_sender
);
1110 if (trgt_sock
->trgt
.sock
.rcv_buf_state
==
1111 RCV_BUF_STATE_INCOMPLETE
) {
1113 } else if (unlikely(trgt_sock
->trgt
.sock
.rcv_buf_state
==
1114 RCV_BUF_STATE_RESET
)) {
1117 BUG_ON(trgt_sock
->trgt
.sock
.rcv_buf_state
!=
1120 __cor_mngdsocket_readfromconn(cs_m_l
, trgt_sock
,
1121 &send_eof
, &send_rcvend
,
1122 &keepalive_req_rcvd
,
1123 &keepalive_req_cookie
,
1124 &keepalive_resp_rcvd
,
1125 &keepalive_resp_cookie
);
1129 if (unlikely(cs_m_l
->data
.conn_managed
.rcvd_eof
!= 0 &&
1130 cs_m_l
->data
.conn_managed
.rcvd_rcvend
!= 0 &&
1131 trgt_sock
->is_client
== 0)) {
1136 spin_unlock_bh(&trgt_sock
->rcv_lock
);
1138 if (unlikely(reset_needed
)) {
1139 cor_reset_conn(trgt_sock
);
1141 cor_conn_kref_put_bug(cs_m_l
->data
.conn_managed
.src_sock
,
1143 cor_conn_kref_put(cs_m_l
->data
.conn_managed
.trgt_sock
,
1146 cs_m_l
->data
.conn_managed
.src_sock
= 0;
1147 cs_m_l
->data
.conn_managed
.trgt_sock
= 0;
1149 cs_m_l
->data
.conn_managed
.is_reset
= 1;
1150 cor_sk_data_ready(cs_m_l
);
1151 cor_sk_write_space(cs_m_l
);
1154 } else if (do_wake_sender
!= 0) {
1155 cor_wake_sender(trgt_sock
);
1158 if (unlikely(send_eof
!= 0 || send_rcvend
!= 0 ||
1159 keepalive_req_rcvd
!= 0)) {
1160 cor_mngdsocket_flushtoconn_ctrl(cs_m_l
, send_eof
,
1161 send_rcvend
, keepalive_req_rcvd
,
1162 keepalive_req_cookie
);
1164 if (unlikely(keepalive_resp_rcvd
!= 0))
1165 cor_keepalive_resp_rcvd(cs_m_l
, keepalive_resp_cookie
);
1169 #define RC_RFC_INCOMPLETE 1
1170 #define RC_RFC_EOF 2
1171 #define RC_RFC_RESET 3
1172 int cor_mngdsocket_readfromconn(struct cor_sock
*cs_m_l
)
1174 BUG_ON(cs_m_l
->type
!= CS_TYPE_CONN_MANAGED
);
1176 if (unlikely(cs_m_l
->isreleased
!= 0))
1177 return RC_RFC_RESET
;
1179 if (unlikely(cs_m_l
->data
.conn_managed
.is_reset
!= 0))
1180 return RC_RFC_RESET
;
1182 BUG_ON(cs_m_l
->data
.conn_managed
.rcvbuf_consumed
>
1183 cs_m_l
->data
.conn_managed
.rcv_data_len
);
1185 if (cs_m_l
->data
.conn_managed
.rcvbuf_consumed
<
1186 cs_m_l
->data
.conn_managed
.rcv_data_len
&&
1187 likely(cs_m_l
->data
.conn_managed
.shutdown_rd
== 0))
1190 if (unlikely(cs_m_l
->data
.conn_managed
.trgt_sock
== 0))
1191 return RC_RFC_INCOMPLETE
;
1193 _cor_mngdsocket_readfromconn(cs_m_l
);
1195 if (unlikely(cs_m_l
->data
.conn_managed
.is_reset
!= 0 ||
1196 cs_m_l
->data
.conn_managed
.shutdown_rd
!= 0))
1197 return RC_RFC_RESET
;
1199 if (unlikely(cs_m_l
->data
.conn_managed
.shutdown_rd
!= 0))
1200 return RC_RFC_RESET
;
1202 if (unlikely(cs_m_l
->data
.conn_managed
.rcvd_eof
!= 0))
1205 if (cs_m_l
->data
.conn_managed
.rcv_buf_state
== RCV_BUF_STATE_INCOMPLETE
)
1206 return RC_RFC_INCOMPLETE
;
1211 void cor_mngdsocket_readfromconn_wq(struct work_struct
*work
)
1213 struct cor_sock
*cs
= container_of(work
, struct cor_sock
,
1216 __u8 data_ready
= 0;
1218 mutex_lock(&cs
->lock
);
1220 atomic_set(&cs
->readfromconn_work_scheduled
, 0);
1223 if (unlikely(cs
->isreleased
!= 0))
1226 BUG_ON(cs
->type
!= CS_TYPE_CONN_MANAGED
);
1228 rc
= cor_mngdsocket_readfromconn(cs
);
1230 if (rc
== RCV_BUF_STATE_OK
&& cs
->data
.conn_managed
.rcv_data_len
> 0)
1234 mutex_unlock(&cs
->lock
);
1236 if (data_ready
!= 0)
1237 cor_sk_data_ready(cs
);
1239 kref_put(&cs
->ref
, cor_free_sock
);
1242 void cor_mngdsocket_readfromconn_fromatomic(struct cor_sock
*cs
)
1244 if (unlikely(cs
== 0))
1247 if (atomic_xchg(&cs
->readfromconn_work_scheduled
, 1) == 0) {
1250 schedule_work(&cs
->readfromconn_work
);
1254 static int _cor_mngdsocket_recvmsg(struct msghdr
*msg
, __u32 totallen
,
1255 struct cor_sock
*cs
, int firstrecv
, int peek
)
1266 mutex_lock(&cs
->lock
);
1268 rc
= cor_mngdsocket_check_connected(cs
);
1269 if (unlikely(rc
!= 0))
1272 rfc_rc
= cor_mngdsocket_readfromconn(cs
);
1273 if (unlikely(rfc_rc
== RC_RFC_RESET
)) {
1276 } else if (unlikely(rfc_rc
== RC_RFC_EOF
)) {
1279 cs
->data
.conn_managed
.shutdown_rd
= 1;
1281 } else if (rfc_rc
== RC_RFC_INCOMPLETE
) {
1286 BUG_ON(rfc_rc
!= RC_RFC_OK
);
1288 BUG_ON(cs
->data
.conn_managed
.rcv_data_len
>= 65536);
1289 BUG_ON(cs
->data
.conn_managed
.rcvbuf_consumed
>=
1290 cs
->data
.conn_managed
.rcv_data_len
);
1291 bufleft
= cs
->data
.conn_managed
.rcv_data_len
-
1292 cs
->data
.conn_managed
.rcvbuf_consumed
;
1294 BUG_ON(totallen
> 1024 * 1024 * 1024);
1300 BUG_ON(cs
->data
.conn_managed
.rcv_buf
== 0);
1302 st_rc
= copy_to_iter(cs
->data
.conn_managed
.rcv_buf
+
1303 cs
->data
.conn_managed
.rcvbuf_consumed
, len
,
1306 if (unlikely(st_rc
!= len
)) {
1311 if (likely(peek
== 0))
1312 cs
->data
.conn_managed
.rcvbuf_consumed
+= (__u16
) len
;
1317 mutex_unlock(&cs
->lock
);
1322 int cor_mngdsocket_recvmsg(struct socket
*sock
, struct msghdr
*msg
,
1323 size_t total_len
, int flags
)
1325 struct cor_sock
*cs
= (struct cor_sock
*) sock
->sk
;
1327 int blocking
= (flags
& MSG_DONTWAIT
) == 0;
1328 int peek
= (flags
& MSG_PEEK
) != 0;
1332 __u32 max
= (1024 * 1024 * 1024);
1335 totallen
= total_len
;
1336 if (unlikely(totallen
> max
|| total_len
> max
))
1338 if (unlikely(peek
!= 0) && totallen
> 1)
1342 while (copied
< totallen
) {
1343 rc
= _cor_mngdsocket_recvmsg(msg
, totallen
- copied
, cs
,
1346 if (rc
== -EAGAIN
&& blocking
&& copied
== 0) {
1349 waitret
= wait_event_interruptible_timeout(
1351 atomic_read(&cs
->ready_to_read
) != 0,
1352 cs
->sk
.sk_rcvtimeo
);
1354 if (unlikely(waitret
< 0))
1355 rc
= sock_intr_errno(cs
->sk
.sk_rcvtimeo
);
1356 else if (unlikely(waitret
== 0))
1362 if (rc
> 0 || copied
== 0)
1364 if (unlikely(rc
== -EFAULT
))
1369 BUG_ON(copied
> 0 && ((__u32
) copied
> totallen
));
1372 BUG_ON(copied
> 0 && unlikely((copied
> total_len
||
1373 copied
> totallen
)));
1378 static unsigned int cor_mngdsocket_poll(struct file
*file
, struct socket
*sock
,
1381 unsigned int mask
= 0;
1383 struct cor_sock
*cs
= (struct cor_sock
*) sock
->sk
;
1385 sock_poll_wait(file
, sock
, wait
);
1387 mutex_lock(&cs
->lock
);
1389 if (cs
->type
== CS_TYPE_UNCONNECTED
) {
1391 } else if (cs
->type
== CS_TYPE_LISTENER
) {
1392 spin_lock_bh(&cor_bindnodes
);
1393 if (unlikely(cs
->data
.listener
.queue_maxlen
<= 0))
1395 else if (list_empty(&cs
->data
.listener
.conn_queue
) == 0)
1396 mask
|= (POLLIN
| POLLRDNORM
);
1397 spin_unlock_bh(&cor_bindnodes
);
1398 } else if (cs
->type
== CS_TYPE_CONN_MANAGED
) {
1399 struct cor_conn
*src_sock
= cs
->data
.conn_managed
.src_sock
;
1401 if (unlikely(unlikely(cs
->data
.conn_managed
.is_reset
!= 0) ||
1402 unlikely(cs
->data
.conn_managed
.connect_state
==
1403 CS_CONNECTSTATE_ERROR
))) {
1408 if (unlikely(cs
->data
.conn_managed
.connect_state
!=
1409 CS_CONNECTSTATE_CONNECTED
))
1412 if (cor_mngdsocket_readfromconn(cs
) != RC_RFC_INCOMPLETE
)
1413 mask
|= (POLLIN
| POLLRDNORM
);
1416 if (unlikely(src_sock
== 0)) {
1421 spin_lock_bh(&src_sock
->rcv_lock
);
1422 if (unlikely(src_sock
->isreset
!= 0 ||
1423 cor_is_src_sock(src_sock
, cs
) == 0)) {
1425 } else if (cor_sock_sndbufavailable(src_sock
, 1) != 0) {
1426 mask
|= (POLLOUT
| POLLWRNORM
);
1428 spin_unlock_bh(&src_sock
->rcv_lock
);
1434 mutex_unlock(&cs
->lock
);
1439 const struct proto_ops cor_mngd_proto_ops
= {
1441 .owner
= THIS_MODULE
,
1442 .release
= cor_mngdsocket_release
,
1443 .bind
= cor_mngdsocket_bind
,
1444 .connect
= cor_mngdsocket_connect
,
1445 .accept
= cor_mngdsocket_accept
,
1446 .listen
= cor_mngdsocket_listen
,
1447 .shutdown
= cor_mngdsocket_shutdown
,
1448 .ioctl
= cor_mngdsocket_ioctl
,
1449 .setsockopt
= cor_mngdsocket_setsockopt
,
1450 .getsockopt
= cor_mngdsocket_getsockopt
,
1451 #ifdef CONFIG_COMPAT
1452 .combat_ioctl
= cor_mngdsocket_ioctl
,
1453 .compat_setsockopt
= cor_mngdsocket_setsockopt
,
1454 .compat_getsockopt
= cor_mngdsocket_getsockopt
,
1456 .sendmsg
= cor_mngdsocket_sendmsg
,
1457 .recvmsg
= cor_mngdsocket_recvmsg
,
1458 .poll
= cor_mngdsocket_poll
,
1459 .socketpair
= cor_socket_socketpair
,
1460 .getname
= cor_socket_getname
,
1461 .mmap
= cor_socket_mmap
,
1463 /* sendpage, splice_read, are optional */
1466 int cor_create_managed_sock(struct net
*net
, struct socket
*sock
, int protocol
,
1469 int rc
= _cor_createsock(net
, sock
, protocol
, kern
, 1);
1474 sock
->ops
= &cor_mngd_proto_ops
;
1479 /* static void __init test_chksum(void)
1484 cor_mngdsocket_chksum("12", 2, "3456789", 7, &buf[0], 4);
1485 printk(KERN_ERR "test_chksum %hhx %hhx %hhx %hhx %hhx\n", buf[0],
1486 buf[1], buf[2], buf[3], buf[4]); // 83 92 6 e3
1487 cor_mngdsocket_chksum("123456789", 9, "", 0, &buf[0], 4);
1488 printk(KERN_ERR "test_chksum %hhx %hhx %hhx %hhx %hhx\n", buf[0],
1489 buf[1], buf[2], buf[3], buf[4]); // 83 92 6 e3
1492 int __init
cor_sock_managed_init1(void)
1494 memset(&cor_sock_cookie_rb
, 0, sizeof(cor_sock_cookie_rb
));
1496 /* test_chksum(); */
1501 MODULE_LICENSE("GPL");