2 * Connection oriented routing
3 * Copyright (C) 2007-2021 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/net.h>
18 #include <linux/uaccess.h>
22 static int cor_rawsocket_release_trypasssocket(struct cor_sock
*cs
)
24 struct cor_sock
*passto
;
25 struct cor_conn
*src_sock
;
26 struct cor_conn
*trgt_sock
;
29 mutex_lock(&cs
->lock
);
31 BUG_ON(cs
->type
!= CS_TYPE_CONN_RAW
);
32 passto
= cs
->data
.conn_raw
.pass_on_close
;
33 cs
->data
.conn_raw
.pass_on_close
= 0;
35 src_sock
= cs
->data
.conn_raw
.src_sock
;
36 trgt_sock
= cs
->data
.conn_raw
.trgt_sock
;
38 mutex_unlock(&cs
->lock
);
43 mutex_lock(&passto
->lock
);
44 spin_lock_bh(&src_sock
->rcv_lock
);
45 spin_lock_bh(&trgt_sock
->rcv_lock
);
47 BUG_ON(src_sock
->is_client
== 0);
48 BUG_ON(passto
->type
!= CS_TYPE_CONN_MANAGED
);
50 if (unlikely(unlikely(passto
->isreleased
!= 0) ||
51 unlikely(passto
->data
.conn_managed
.connect_state
!=
52 CS_CONNECTSTATE_CONNECTING
)))
55 BUG_ON(passto
->data
.conn_managed
.src_sock
!= 0);
56 BUG_ON(passto
->data
.conn_managed
.trgt_sock
!= 0);
58 if (unlikely(unlikely(src_sock
->isreset
!= 0) ||
59 unlikely(trgt_sock
->isreset
!= 0))) {
60 __cor_set_sock_connecterror(passto
, ENETUNREACH
);
64 BUG_ON(src_sock
->sourcetype
!= SOURCE_SOCK
);
65 BUG_ON(src_sock
->src
.sock
.ed
->cs
!= cs
);
66 BUG_ON(trgt_sock
->targettype
!= TARGET_SOCK
);
67 BUG_ON(trgt_sock
->trgt
.sock
.cs
!= cs
);
69 passto
->data
.conn_managed
.src_sock
= src_sock
;
70 passto
->data
.conn_managed
.trgt_sock
= trgt_sock
;
71 src_sock
->src
.sock
.ed
->cs
= passto
;
72 trgt_sock
->trgt
.sock
.cs
= passto
;
73 kref_get(&passto
->ref
);
74 kref_get(&passto
->ref
);
76 src_sock
->src
.sock
.ed
->priority
= cs
->priority
;
78 cor_set_conn_is_highlatency(src_sock
, passto
->is_highlatency
, 1, 1);
80 BUG_ON(passto
->data
.conn_managed
.rcv_buf
== 0);
81 src_sock
->src
.sock
.socktype
= SOCKTYPE_MANAGED
;
82 trgt_sock
->trgt
.sock
.socktype
= SOCKTYPE_MANAGED
;
83 trgt_sock
->trgt
.sock
.rcv_buf_state
= RCV_BUF_STATE_INCOMPLETE
;
84 trgt_sock
->trgt
.sock
.rcv_buf
= passto
->data
.conn_managed
.rcv_buf
;
85 trgt_sock
->trgt
.sock
.rcvd
= 0;
87 BUG_ON(src_sock
->src
.sock
.keepalive_intransit
!= 0);
88 src_sock
->src
.sock
.ed
->jiffies_keepalive_lastact
= jiffies
-
89 KEEPALIVE_INTERVAL_SECS
* HZ
+ HZ
;
90 cor_keepalive_req_sched_timer(src_sock
);
92 cor_conn_refresh_priority(src_sock
, 1);
97 spin_unlock_bh(&trgt_sock
->rcv_lock
);
98 spin_unlock_bh(&src_sock
->rcv_lock
);
99 mutex_unlock(&passto
->lock
);
102 mutex_lock(&cs
->lock
);
103 cs
->data
.conn_raw
.src_sock
= 0;
104 cs
->data
.conn_raw
.trgt_sock
= 0;
105 mutex_unlock(&cs
->lock
);
108 cs
->sk
.sk_socket
->state
= SS_CONNECTED
;
109 release_sock(&cs
->sk
);
111 mutex_lock(&passto
->lock
);
112 BUG_ON(passto
->type
!= CS_TYPE_CONN_MANAGED
);
113 passto
->data
.conn_managed
.connect_state
=
114 CS_CONNECTSTATE_CONNECTED
;
115 if (likely(passto
->isreleased
== 0)) {
116 atomic_set(&passto
->ready_to_write
, 1);
118 passto
->sk
.sk_state_change(&passto
->sk
);
120 mutex_unlock(&passto
->lock
);
123 /* pointers from struct cor_conn */
124 kref_put(&cs
->ref
, cor_kreffree_bug
);
125 kref_put(&cs
->ref
, cor_kreffree_bug
);
128 kref_put(&passto
->ref
, cor_free_sock
);
133 int cor_rawsocket_release(struct socket
*sock
)
135 struct cor_sock
*cs
= (struct cor_sock
*) sock
->sk
;
138 mutex_lock(&cs
->lock
);
141 mutex_unlock(&cs
->lock
);
143 if (type
== CS_TYPE_UNCONNECTED
) {
144 } else if (type
== CS_TYPE_CONN_RAW
) {
145 mutex_lock(&cs
->lock
);
146 BUG_ON(cs
->type
!= CS_TYPE_CONN_RAW
);
147 if (cs
->data
.conn_raw
.rcvitem
!= 0) {
148 BUG_ON(cs
->data
.conn_raw
.trgt_sock
== 0);
150 cor_databuf_unpull_dpi(cs
->data
.conn_raw
.trgt_sock
, cs
,
151 cs
->data
.conn_raw
.rcvitem
,
152 cs
->data
.conn_raw
.rcvoffset
);
153 cs
->data
.conn_raw
.rcvitem
= 0;
155 mutex_unlock(&cs
->lock
);
157 if (cor_rawsocket_release_trypasssocket(cs
) != 0)
160 mutex_lock(&cs
->lock
);
161 BUG_ON(cs
->type
!= CS_TYPE_CONN_RAW
);
162 if (cs
->data
.conn_raw
.src_sock
!= 0 &&
163 cs
->data
.conn_raw
.trgt_sock
!= 0) {
164 cor_reset_conn(cs
->data
.conn_raw
.src_sock
);
165 cor_conn_kref_put_bug(cs
->data
.conn_raw
.src_sock
,
167 cor_conn_kref_put(cs
->data
.conn_raw
.trgt_sock
,
169 cs
->data
.conn_raw
.src_sock
= 0;
170 cs
->data
.conn_raw
.trgt_sock
= 0;
172 mutex_unlock(&cs
->lock
);
178 kref_put(&cs
->ref
, cor_free_sock
);
183 int cor_rawsocket_bind(struct socket
*sock
, struct sockaddr
*saddr
,
189 int cor_rawsocket_connect(struct socket
*sock
, struct sockaddr
*saddr
,
190 int sockaddr_len
, int flags
)
192 struct cor_sock
*cs
= (struct cor_sock
*) sock
->sk
;
194 struct cor_conn_bidir
*cnb
;
195 struct cor_conn
*src_sock
;
196 struct cor_conn
*trgt_sock
;
198 mutex_lock(&cs
->lock
);
199 if (cs
->type
!= CS_TYPE_UNCONNECTED
) {
200 mutex_unlock(&cs
->lock
);
204 cnb
= cor_alloc_conn(GFP_KERNEL
, cs
->is_highlatency
);
206 if (unlikely(cnb
== 0)) {
207 mutex_unlock(&cs
->lock
);
211 src_sock
= &cnb
->cli
;
212 trgt_sock
= &cnb
->srv
;
214 spin_lock_bh(&src_sock
->rcv_lock
);
215 spin_lock_bh(&trgt_sock
->rcv_lock
);
217 if (cor_conn_init_sock_source(src_sock
) != 0) {
218 spin_unlock_bh(&trgt_sock
->rcv_lock
);
219 spin_unlock_bh(&src_sock
->rcv_lock
);
220 mutex_unlock(&cs
->lock
);
221 cor_reset_conn(src_sock
);
222 cor_conn_kref_put(src_sock
, "alloc_conn");
225 cor_conn_init_sock_target(trgt_sock
);
227 memset(&cs
->data
, 0, sizeof(cs
->data
));
228 cs
->type
= CS_TYPE_CONN_RAW
;
229 cs
->data
.conn_raw
.src_sock
= src_sock
;
230 cs
->data
.conn_raw
.trgt_sock
= trgt_sock
;
231 cor_conn_kref_get(src_sock
, "socket");
232 cor_conn_kref_get(trgt_sock
, "socket");
234 src_sock
->src
.sock
.ed
->cs
= cs
;
235 trgt_sock
->trgt
.sock
.cs
= cs
;
239 src_sock
->src
.sock
.socktype
= SOCKTYPE_RAW
;
240 trgt_sock
->trgt
.sock
.socktype
= SOCKTYPE_RAW
;
241 src_sock
->src
.sock
.ed
->priority
= cs
->priority
;
243 cor_conn_refresh_priority(src_sock
, 1);
245 spin_unlock_bh(&trgt_sock
->rcv_lock
);
246 spin_unlock_bh(&src_sock
->rcv_lock
);
247 mutex_unlock(&cs
->lock
);
250 sock
->state
= SS_CONNECTED
;
251 release_sock(&cs
->sk
);
253 cor_conn_kref_put(src_sock
, "alloc_conn");
258 int cor_rawsocket_accept(struct socket
*sock
, struct socket
*newsock
, int flags
,
264 int cor_rawsocket_listen(struct socket
*sock
, int len
)
269 int cor_rawsocket_shutdown(struct socket
*sock
, int flags
)
274 int cor_rawsocket_ioctl(struct socket
*sock
, unsigned int cmd
,
280 static int cor_rawsocket_setsockopt_passonclose(struct socket
*sock
,
281 char __user
*optval
, unsigned int optlen
)
283 struct cor_sock
*cs
= (struct cor_sock
*) sock
->sk
;
289 struct cor_sock
*passto
;
291 if (unlikely(optlen
!= 8))
294 notread
= copy_from_user((char *) &cookie
, optval
, 8);
295 if (unlikely(notread
!= 0))
298 passto
= cor_get_sock_by_cookie(cookie
);
299 if (unlikely(passto
== 0))
302 mutex_lock(&cs
->lock
);
303 if (unlikely(cs
->type
!= CS_TYPE_CONN_RAW
)) {
308 BUG_ON(passto
->type
!= CS_TYPE_CONN_MANAGED
);
310 if (unlikely(cs
->data
.conn_raw
.pass_on_close
!= 0))
311 kref_put(&cs
->data
.conn_raw
.pass_on_close
->ref
, cor_free_sock
);
313 cs
->data
.conn_raw
.pass_on_close
= passto
;
316 mutex_unlock(&cs
->lock
);
318 if (unlikely(rc
!= 0))
319 kref_put(&passto
->ref
, cor_free_sock
);
324 int cor_rawsocket_setsockopt(struct socket
*sock
, int level
,
325 int optname
, char __user
*optval
, unsigned int optlen
)
327 if (unlikely(level
!= SOL_COR
))
330 if (optname
== COR_PASS_ON_CLOSE
) {
331 return cor_rawsocket_setsockopt_passonclose(sock
, optval
,
333 } else if (optname
== COR_TOS
) {
334 return cor_socket_setsockopt_tos(sock
, optval
, optlen
);
335 } else if (optname
== COR_PRIORITY
) {
336 return cor_socket_setsockopt_priority(sock
, optval
, optlen
);
342 int cor_rawsocket_getsockopt(struct socket
*sock
, int level
,
343 int optname
, char __user
*optval
, int __user
*optlen
)
348 static unsigned int _cor_rawsocket_poll(struct cor_sock
*cs
, __u32 writelen
,
351 unsigned int mask
= 0;
353 struct cor_conn
*trgt_sock
;
354 struct cor_conn
*src_sock
;
356 mutex_lock(&cs
->lock
);
359 BUG_ON(cs
->type
!= CS_TYPE_CONN_RAW
);
361 BUG_ON(cs
->type
!= CS_TYPE_UNCONNECTED
&&
362 cs
->type
!= CS_TYPE_CONN_RAW
);
363 if (unlikely(cs
->type
!= CS_TYPE_CONN_RAW
)) {
364 mutex_unlock(&cs
->lock
);
369 trgt_sock
= cs
->data
.conn_raw
.trgt_sock
;
370 src_sock
= cs
->data
.conn_raw
.src_sock
;
372 if (unlikely(trgt_sock
== 0 || src_sock
== 0)) {
373 mutex_unlock(&cs
->lock
);
377 spin_lock_bh(&trgt_sock
->rcv_lock
);
378 if (unlikely(trgt_sock
->isreset
!= 0 ||
379 cor_is_trgt_sock(trgt_sock
, cs
) == 0)) {
381 } else if (cs
->data
.conn_raw
.rcvitem
!= 0 ||
382 trgt_sock
->data_buf
.read_remaining
!= 0) {
383 mask
|= (POLLIN
| POLLRDNORM
);
385 spin_unlock_bh(&trgt_sock
->rcv_lock
);
387 spin_lock_bh(&src_sock
->rcv_lock
);
388 if (unlikely(src_sock
->isreset
!= 0 ||
389 cor_is_src_sock(src_sock
, cs
) == 0)) {
391 } else if (cor_sock_sndbufavailable(src_sock
, 1)) {
392 mask
|= (POLLOUT
| POLLWRNORM
);
394 spin_unlock_bh(&src_sock
->rcv_lock
);
396 mutex_unlock(&cs
->lock
);
401 static int ___cor_rawsocket_sendmsg(char *buf
, __u32 bufread
,
402 __u32 buflen
, __u8 flush
, struct cor_sock
*cs_r_l
)
404 struct cor_conn
*src_sock
;
409 BUG_ON(cs_r_l
->type
!= CS_TYPE_CONN_RAW
);
411 src_sock
= cs_r_l
->data
.conn_raw
.src_sock
;
412 if (unlikely(src_sock
== 0))
415 spin_lock_bh(&src_sock
->rcv_lock
);
417 if (unlikely(unlikely(cor_is_src_sock(src_sock
, cs_r_l
) == 0) ||
418 unlikely(src_sock
->isreset
!= 0))) {
419 spin_unlock_bh(&src_sock
->rcv_lock
);
423 if (cor_sock_sndbufavailable(src_sock
, 0) == 0) {
425 atomic_set(&cs_r_l
->ready_to_write
, 0);
430 BUG_ON(bufread
> (1024 * 1024 * 1024));
431 BUG_ON(buflen
> (1024 * 1024 * 1024));
433 rc2
= cor_receive_sock(src_sock
, buf
, bufread
, flush
);
435 BUG_ON(rc2
> (1024 * 1024 * 1024));
436 if (unlikely(rc2
== 0))
442 cor_flush_buf(src_sock
);
445 spin_unlock_bh(&src_sock
->rcv_lock
);
450 static int __cor_rawsocket_sendmsg(struct msghdr
*msg
, __u32 totallen
,
451 __u8 flush
, struct cor_sock
*cs_r_l
)
455 __u32 buflen
= cor_buf_optlen(totallen
, 1);
456 __u32 len
= totallen
;
460 BUG_ON(totallen
> (1024 * 1024 * 1024));
461 BUG_ON(buflen
> (1024 * 1024 * 1024));
468 if (unlikely(len
<= 0))
471 buf
= kmalloc(buflen
, GFP_KERNEL
);
472 if (unlikely(buf
== 0))
475 memset(buf
, 0, buflen
);
477 st_rc
= copy_from_iter(buf
+ bufread
, len
, &msg
->msg_iter
);
479 if (unlikely(st_rc
!= len
)) {
484 rc
= ___cor_rawsocket_sendmsg(buf
, len
, buflen
, flush
, cs_r_l
);
491 static int _cor_rawsocket_sendmsg(struct msghdr
*msg
, __u32 totallen
,
492 struct cor_sock
*cs
, __u8 flush
)
496 BUG_ON(totallen
> (1024 * 1024 * 1024));
498 mutex_lock(&cs
->lock
);
500 BUG_ON(cs
->type
!= CS_TYPE_UNCONNECTED
&& cs
->type
!= CS_TYPE_CONN_RAW
);
501 if (unlikely(cs
->type
== CS_TYPE_UNCONNECTED
)) {
502 mutex_unlock(&cs
->lock
);
504 } else if (unlikely(cs
->type
!= CS_TYPE_CONN_RAW
)) {
505 mutex_unlock(&cs
->lock
);
509 copied
= __cor_rawsocket_sendmsg(msg
, totallen
, flush
, cs
);
510 BUG_ON(copied
> 0 && ((__u32
) copied
) > totallen
);
512 mutex_unlock(&cs
->lock
);
517 int cor_rawsocket_sendmsg(struct socket
*sock
, struct msghdr
*msg
,
520 __u8 flush
= ((msg
->msg_flags
& MSG_MORE
) == 0) ? 1 : 0;
521 int blocking
= (msg
->msg_flags
& MSG_DONTWAIT
) == 0;
526 struct cor_sock
*cs
= (struct cor_sock
*) sock
->sk
;
528 __u32 max
= (1024 * 1024 * 1024);
531 totallen
= total_len
;
532 if (unlikely(totallen
> max
|| total_len
> max
)) {
537 while (rc
>= 0 && copied
< totallen
) {
538 rc
= _cor_rawsocket_sendmsg(msg
, totallen
, cs
, flush
);
540 BUG_ON(rc
> 0 && unlikely((rc
> total_len
|| rc
> totallen
)));
542 if (rc
== -EAGAIN
&& blocking
&& copied
== 0) {
545 waitret
= wait_event_interruptible_timeout(
547 atomic_read(&cs
->ready_to_write
) != 0,
550 if (unlikely(waitret
< 0))
551 rc
= sock_intr_errno(cs
->sk
.sk_sndtimeo
);
552 else if (unlikely(waitret
== 0))
558 if (rc
> 0 || copied
== 0)
560 if (unlikely(rc
== -EFAULT
))
563 BUG_ON(copied
> 0 && ((__u32
) copied
> totallen
));
569 static int __cor_rawsocket_recvmsg(struct msghdr
*msg
, __u32 totallen
,
572 struct cor_data_buf_item
*dbi
= cs
->data
.conn_raw
.rcvitem
;
578 BUG_ON(totallen
> (1024 * 1024 * 1024));
583 BUG_ON(dbi
->datalen
<= cs
->data
.conn_raw
.rcvoffset
);
586 if (len
> (dbi
->datalen
- cs
->data
.conn_raw
.rcvoffset
))
587 len
= dbi
->datalen
- cs
->data
.conn_raw
.rcvoffset
;
589 if (unlikely(len
<= 0))
592 st_rc
= copy_to_iter(dbi
->buf
+ cs
->data
.conn_raw
.rcvoffset
, len
,
595 if (unlikely(st_rc
!= len
))
599 cs
->data
.conn_raw
.rcvoffset
+= len
;
600 if (dbi
->datalen
== cs
->data
.conn_raw
.rcvoffset
) {
601 cor_databuf_item_free(cs
->data
.conn_raw
.rcvitem
);
602 cs
->data
.conn_raw
.rcvitem
= 0;
603 cs
->data
.conn_raw
.rcvoffset
= 0;
606 BUG_ON(written
> totallen
);
611 static int _cor_rawsocket_recvmsg(struct msghdr
*msg
, __u32 totallen
,
612 struct cor_sock
*cs_r
)
617 struct cor_conn
*trgt_sock
;
619 mutex_lock(&cs_r
->lock
);
621 BUG_ON(cs_r
->type
!= CS_TYPE_CONN_RAW
);
623 trgt_sock
= cs_r
->data
.conn_raw
.trgt_sock
;
625 if (unlikely(cs_r
->data
.conn_raw
.src_sock
== 0 || trgt_sock
== 0)) {
626 mutex_unlock(&cs_r
->lock
);
630 cor_conn_kref_get(trgt_sock
, "stack");
632 while (rc
>= 0 && copied
< totallen
) {
633 if (cs_r
->data
.conn_raw
.rcvitem
!= 0)
636 spin_lock_bh(&trgt_sock
->rcv_lock
);
637 if (unlikely(unlikely(cor_is_trgt_sock(trgt_sock
, cs_r
) == 0) |
638 unlikely(trgt_sock
->isreset
!= 0))) {
639 spin_unlock_bh(&trgt_sock
->rcv_lock
);
640 cor_conn_kref_put(trgt_sock
, "stack");
641 mutex_unlock(&cs_r
->lock
);
645 cor_databuf_pull_dbi(cs_r
, trgt_sock
);
646 if (cs_r
->data
.conn_raw
.rcvitem
== 0)
647 atomic_set(&cs_r
->ready_to_read
, 0);
649 cor_bufsize_read_to_sock(trgt_sock
);
651 spin_unlock_bh(&trgt_sock
->rcv_lock
);
654 rc
= __cor_rawsocket_recvmsg(msg
, totallen
- copied
, cs_r
);
656 if (rc
> 0 || copied
== 0)
658 if (unlikely(rc
== -EFAULT
))
661 BUG_ON(copied
> 0 && ((__u32
) copied
> totallen
));
664 mutex_unlock(&cs_r
->lock
);
666 if (likely(copied
> 0))
667 cor_wake_sender(trgt_sock
);
669 cor_conn_kref_put(trgt_sock
, "stack");
674 int cor_rawsocket_recvmsg(struct socket
*sock
, struct msghdr
*msg
,
675 size_t total_len
, int flags
)
677 struct cor_sock
*cs
= (struct cor_sock
*) sock
->sk
;
679 int blocking
= (flags
& MSG_DONTWAIT
) == 0;
682 __u32 max
= (1024 * 1024 * 1024);
685 totallen
= total_len
;
686 if (unlikely(totallen
> max
|| total_len
> max
))
689 if (unlikely((flags
& MSG_PEEK
) != 0))
692 mutex_lock(&cs
->lock
);
693 BUG_ON(cs
->type
!= CS_TYPE_UNCONNECTED
&& cs
->type
!= CS_TYPE_CONN_RAW
);
694 if (unlikely(cs
->type
== CS_TYPE_UNCONNECTED
)) {
695 mutex_unlock(&cs
->lock
);
697 } else if (unlikely(cs
->type
!= CS_TYPE_CONN_RAW
)) {
698 mutex_unlock(&cs
->lock
);
701 mutex_unlock(&cs
->lock
);
704 rc
= _cor_rawsocket_recvmsg(msg
, totallen
, cs
);
706 BUG_ON(rc
> 0 && unlikely((rc
> total_len
|| rc
> totallen
)));
708 if (rc
== -EAGAIN
&& blocking
) {
709 if (wait_event_interruptible(*sk_sleep(&cs
->sk
),
710 atomic_read(&cs
->ready_to_read
) != 0) == 0)
718 static unsigned int cor_rawsocket_poll(struct file
*file
, struct socket
*sock
,
721 struct cor_sock
*cs
= (struct cor_sock
*) sock
->sk
;
723 sock_poll_wait(file
, sock
, wait
);
724 return _cor_rawsocket_poll(cs
, U32_MAX
, 1);
728 const struct proto_ops cor_raw_proto_ops
= {
730 .owner
= THIS_MODULE
,
731 .release
= cor_rawsocket_release
,
732 .bind
= cor_rawsocket_bind
,
733 .connect
= cor_rawsocket_connect
,
734 .accept
= cor_rawsocket_accept
,
735 .listen
= cor_rawsocket_listen
,
736 .shutdown
= cor_rawsocket_shutdown
,
737 .ioctl
= cor_rawsocket_ioctl
,
738 .setsockopt
= cor_rawsocket_setsockopt
,
739 .getsockopt
= cor_rawsocket_getsockopt
,
741 .combat_ioctl
= cor_rawsocket_ioctl
,
742 .compat_setsockopt
= cor_rawsocket_setsockopt
,
743 .compat_getsockopt
= cor_rawsocket_getsockopt
,
745 .sendmsg
= cor_rawsocket_sendmsg
,
746 .recvmsg
= cor_rawsocket_recvmsg
,
747 .poll
= cor_rawsocket_poll
,
748 .socketpair
= cor_socket_socketpair
,
749 .getname
= cor_socket_getname
,
750 .mmap
= cor_socket_mmap
,
752 /* sendpage, splice_read, are optional */
755 int cor_create_raw_sock(struct net
*net
, struct socket
*sock
, int protocol
,
758 int rc
= _cor_createsock(net
, sock
, protocol
, kern
, 1);
763 sock
->ops
= &cor_raw_proto_ops
;
768 MODULE_LICENSE("GPL");