2 * Connection oriented routing
3 * Copyright (C) 2007-2021 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 #include <linux/net.h>
23 #include <asm/uaccess.h>
27 static int cor_rawsocket_release_trypasssocket(struct cor_sock
*cs
)
29 struct cor_sock
*passto
;
30 struct cor_conn
*src_sock
;
31 struct cor_conn
*trgt_sock
;
34 mutex_lock(&(cs
->lock
));
36 BUG_ON(cs
->type
!= CS_TYPE_CONN_RAW
);
37 passto
= cs
->data
.conn_raw
.pass_on_close
;
38 cs
->data
.conn_raw
.pass_on_close
= 0;
40 src_sock
= cs
->data
.conn_raw
.src_sock
;
41 trgt_sock
= cs
->data
.conn_raw
.trgt_sock
;
43 mutex_unlock(&(cs
->lock
));
48 mutex_lock(&(passto
->lock
));
49 spin_lock_bh(&(src_sock
->rcv_lock
));
50 spin_lock_bh(&(trgt_sock
->rcv_lock
));
52 BUG_ON(src_sock
->is_client
== 0);
53 BUG_ON(passto
->type
!= CS_TYPE_CONN_MANAGED
);
55 if (unlikely(unlikely(passto
->isreleased
!= 0) ||
56 unlikely(passto
->data
.conn_managed
.connect_state
!=
57 CS_CONNECTSTATE_CONNECTING
)))
60 BUG_ON(passto
->data
.conn_managed
.src_sock
!= 0);
61 BUG_ON(passto
->data
.conn_managed
.trgt_sock
!= 0);
63 if (unlikely(unlikely(src_sock
->isreset
!= 0) ||
64 unlikely(trgt_sock
->isreset
!= 0))) {
65 __cor_set_sock_connecterror(passto
, -ENETUNREACH
);
69 BUG_ON(src_sock
->sourcetype
!= SOURCE_SOCK
);
70 BUG_ON(src_sock
->source
.sock
.cs
!= cs
);
71 BUG_ON(trgt_sock
->targettype
!= TARGET_SOCK
);
72 BUG_ON(trgt_sock
->target
.sock
.cs
!= cs
);
74 passto
->data
.conn_managed
.src_sock
= src_sock
;
75 passto
->data
.conn_managed
.trgt_sock
= trgt_sock
;
76 src_sock
->source
.sock
.cs
= passto
;
77 trgt_sock
->target
.sock
.cs
= passto
;
78 kref_get(&(passto
->ref
));
79 kref_get(&(passto
->ref
));
81 src_sock
->source
.sock
.priority
= cs
->priority
;
82 BUG_ON(passto
->data
.conn_managed
.rcv_buf
== 0);
83 trgt_sock
->target
.sock
.socktype
= SOCKTYPE_MANAGED
;
84 trgt_sock
->target
.sock
.rcv_buf_state
= RCV_BUF_STATE_INCOMPLETE
;
85 trgt_sock
->target
.sock
.rcv_buf
= passto
->data
.conn_managed
.rcv_buf
;
86 trgt_sock
->target
.sock
.rcvd
= 0;
88 cor_conn_refresh_priority(src_sock
, 1);
93 spin_unlock_bh(&(trgt_sock
->rcv_lock
));
94 spin_unlock_bh(&(src_sock
->rcv_lock
));
95 mutex_unlock(&(passto
->lock
));
98 mutex_lock(&(cs
->lock
));
99 cs
->data
.conn_raw
.src_sock
= 0;
100 cs
->data
.conn_raw
.trgt_sock
= 0;
101 mutex_unlock(&(cs
->lock
));
103 lock_sock(&(cs
->sk
));
104 cs
->sk
.sk_socket
->state
= SS_CONNECTED
;
105 release_sock(&(cs
->sk
));
107 mutex_lock(&(passto
->lock
));
108 BUG_ON(passto
->type
!= CS_TYPE_CONN_MANAGED
);
109 passto
->data
.conn_managed
.connect_state
=
110 CS_CONNECTSTATE_CONNECTED
;
111 if (likely(passto
->isreleased
== 0)) {
112 atomic_set(&(passto
->ready_to_write
), 1);
114 passto
->sk
.sk_state_change(&(passto
->sk
));
116 mutex_unlock(&(passto
->lock
));
119 /* pointers from struct cor_conn */
120 kref_put(&(cs
->ref
), cor_kreffree_bug
);
121 kref_put(&(cs
->ref
), cor_kreffree_bug
);
124 kref_put(&(passto
->ref
), cor_free_sock
);
129 int cor_rawsocket_release(struct socket
*sock
)
131 struct cor_sock
*cs
= (struct cor_sock
*) sock
->sk
;
134 mutex_lock(&(cs
->lock
));
137 mutex_unlock(&(cs
->lock
));
139 if (type
== CS_TYPE_UNCONNECTED
) {
140 } else if (type
== CS_TYPE_CONN_RAW
) {
141 mutex_lock(&(cs
->lock
));
142 BUG_ON(cs
->type
!= CS_TYPE_CONN_RAW
);
143 if (cs
->data
.conn_raw
.rcvitem
!= 0) {
144 BUG_ON(cs
->data
.conn_raw
.trgt_sock
== 0);
146 cor_databuf_unpull_dpi(cs
->data
.conn_raw
.trgt_sock
, cs
,
147 cs
->data
.conn_raw
.rcvitem
,
148 cs
->data
.conn_raw
.rcvoffset
);
149 cs
->data
.conn_raw
.rcvitem
= 0;
151 mutex_unlock(&(cs
->lock
));
153 if (cor_rawsocket_release_trypasssocket(cs
) != 0)
156 mutex_lock(&(cs
->lock
));
157 BUG_ON(cs
->type
!= CS_TYPE_CONN_RAW
);
158 if (cs
->data
.conn_raw
.src_sock
!= 0 &&
159 cs
->data
.conn_raw
.trgt_sock
!= 0) {
160 cor_reset_conn(cs
->data
.conn_raw
.src_sock
);
161 cor_conn_kref_put_bug(cs
->data
.conn_raw
.src_sock
,
163 cor_conn_kref_put(cs
->data
.conn_raw
.trgt_sock
,
165 cs
->data
.conn_raw
.src_sock
= 0;
166 cs
->data
.conn_raw
.trgt_sock
= 0;
168 mutex_unlock(&(cs
->lock
));
174 kref_put(&(cs
->ref
), cor_free_sock
);
179 int cor_rawsocket_bind(struct socket
*sock
, struct sockaddr
*saddr
,
185 int cor_rawsocket_connect(struct socket
*sock
, struct sockaddr
*saddr
,
186 int sockaddr_len
, int flags
)
188 struct cor_sock
*cs
= (struct cor_sock
*) sock
->sk
;
190 struct cor_conn_bidir
*cnb
;
191 struct cor_conn
*src_sock
;
192 struct cor_conn
*trgt_sock
;
194 cnb
= cor_alloc_conn(GFP_KERNEL
, cs
->is_highlatency
);
196 if (unlikely(cnb
== 0))
199 src_sock
= &(cnb
->cli
);
200 trgt_sock
= &(cnb
->srv
);
202 mutex_lock(&(cs
->lock
));
203 spin_lock_bh(&(src_sock
->rcv_lock
));
204 spin_lock_bh(&(trgt_sock
->rcv_lock
));
205 if (cs
->type
!= CS_TYPE_UNCONNECTED
) {
206 spin_unlock_bh(&(trgt_sock
->rcv_lock
));
207 spin_unlock_bh(&(src_sock
->rcv_lock
));
208 mutex_unlock(&(cs
->lock
));
209 cor_reset_conn(src_sock
);
210 cor_conn_kref_put(src_sock
, "alloc_conn");
214 cor_conn_init_sock_source(src_sock
);
215 cor_conn_init_sock_target(trgt_sock
);
217 memset(&(cs
->data
), 0, sizeof(cs
->data
));
218 cs
->type
= CS_TYPE_CONN_RAW
;
219 cs
->data
.conn_raw
.src_sock
= src_sock
;
220 cs
->data
.conn_raw
.trgt_sock
= trgt_sock
;
221 cor_conn_kref_get(src_sock
, "socket");
222 cor_conn_kref_get(trgt_sock
, "socket");
224 src_sock
->source
.sock
.cs
= cs
;
225 trgt_sock
->target
.sock
.cs
= cs
;
226 kref_get(&(cs
->ref
));
227 kref_get(&(cs
->ref
));
229 trgt_sock
->target
.sock
.socktype
= SOCKTYPE_RAW
;
230 src_sock
->source
.sock
.priority
= cs
->priority
;
232 cor_conn_refresh_priority(src_sock
, 1);
234 spin_unlock_bh(&(trgt_sock
->rcv_lock
));
235 spin_unlock_bh(&(src_sock
->rcv_lock
));
236 mutex_unlock(&(cs
->lock
));
238 lock_sock(&(cs
->sk
));
239 sock
->state
= SS_CONNECTED
;
240 release_sock(&(cs
->sk
));
242 cor_conn_kref_put(src_sock
, "alloc_conn");
247 int cor_rawsocket_accept(struct socket
*sock
, struct socket
*newsock
, int flags
,
253 int cor_rawsocket_listen(struct socket
*sock
, int len
)
258 int cor_rawsocket_shutdown(struct socket
*sock
, int flags
)
263 int cor_rawsocket_ioctl(struct socket
*sock
, unsigned int cmd
,
269 static int cor_rawsocket_setsockopt_passonclose(struct socket
*sock
,
270 char __user
*optval
, unsigned int optlen
)
272 struct cor_sock
*cs
= (struct cor_sock
*) sock
->sk
;
278 struct cor_sock
*passto
;
280 if (unlikely(optlen
!= 8))
283 notread
= copy_from_user((char *) &cookie
, optval
, 8);
284 if (unlikely(notread
!= 0))
287 passto
= cor_get_sock_by_cookie(cookie
);
288 if (unlikely(passto
== 0))
291 mutex_lock(&(cs
->lock
));
292 if (unlikely(cs
->type
!= CS_TYPE_CONN_RAW
)) {
297 BUG_ON(passto
->type
!= CS_TYPE_CONN_MANAGED
);
299 if (unlikely(cs
->data
.conn_raw
.pass_on_close
!= 0))
300 kref_put(&(cs
->data
.conn_raw
.pass_on_close
->ref
),
303 cs
->data
.conn_raw
.pass_on_close
= passto
;
306 mutex_unlock(&(cs
->lock
));
308 if (unlikely(rc
!= 0))
309 kref_put(&(passto
->ref
), cor_free_sock
);
314 int cor_rawsocket_setsockopt(struct socket
*sock
, int level
,
315 int optname
, char __user
*optval
, unsigned int optlen
)
317 if (unlikely(level
!= SOL_COR
)) {
321 if (optname
== COR_PASS_ON_CLOSE
) {
322 return cor_rawsocket_setsockopt_passonclose(sock
, optval
,
324 } else if (optname
== COR_TOS
) {
325 return cor_socket_setsockopt_tos(sock
, optval
, optlen
);
326 } else if (optname
== COR_PRIORITY
) {
327 return cor_socket_setsockopt_priority(sock
, optval
, optlen
);
333 int cor_rawsocket_getsockopt(struct socket
*sock
, int level
,
334 int optname
, char __user
*optval
, int __user
*optlen
)
339 static unsigned int _cor_rawsocket_poll(struct cor_sock
*cs
, __u32 writelen
,
342 unsigned int mask
= 0;
344 struct cor_conn
*trgt_sock
;
345 struct cor_conn
*src_sock
;
347 mutex_lock(&(cs
->lock
));
350 BUG_ON(cs
->type
!= CS_TYPE_CONN_RAW
);
352 BUG_ON(cs
->type
!= CS_TYPE_UNCONNECTED
&&
353 cs
->type
!= CS_TYPE_CONN_RAW
);
354 if (unlikely(cs
->type
!= CS_TYPE_CONN_RAW
)) {
355 mutex_unlock(&(cs
->lock
));
360 trgt_sock
= cs
->data
.conn_raw
.trgt_sock
;
361 src_sock
= cs
->data
.conn_raw
.src_sock
;
363 if (unlikely(trgt_sock
== 0 || src_sock
== 0)) {
364 mutex_unlock(&(cs
->lock
));
368 spin_lock_bh(&(trgt_sock
->rcv_lock
));
369 if (unlikely(trgt_sock
->isreset
!= 0 ||
370 cor_is_trgt_sock(trgt_sock
, cs
) == 0)) {
372 } else if (cs
->data
.conn_raw
.rcvitem
!= 0 ||
373 trgt_sock
->data_buf
.read_remaining
!= 0) {
374 mask
|= (POLLIN
| POLLRDNORM
);
376 spin_unlock_bh(&(trgt_sock
->rcv_lock
));
378 spin_lock_bh(&(src_sock
->rcv_lock
));
379 if (unlikely(src_sock
->isreset
!= 0 ||
380 cor_is_src_sock(src_sock
, cs
) == 0)) {
382 } else if (cor_sock_sndbufavailable(src_sock
, 1)) {
383 mask
|= (POLLOUT
| POLLWRNORM
);
385 spin_unlock_bh(&(src_sock
->rcv_lock
));
387 mutex_unlock(&(cs
->lock
));
392 static int ___cor_rawsocket_sendmsg(char *buf
, __u32 bufread
,
393 __u32 buflen
, __u8 flush
, struct cor_sock
*cs_r_l
)
395 struct cor_conn
*src_sock
;
400 BUG_ON(cs_r_l
->type
!= CS_TYPE_CONN_RAW
);
402 src_sock
= cs_r_l
->data
.conn_raw
.src_sock
;
403 if (unlikely(src_sock
== 0)) {
407 spin_lock_bh(&(src_sock
->rcv_lock
));
409 if (unlikely(unlikely(cor_is_src_sock(src_sock
, cs_r_l
) == 0) ||
410 unlikely(src_sock
->isreset
!= 0))) {
411 spin_unlock_bh(&(src_sock
->rcv_lock
));
415 if (cor_sock_sndbufavailable(src_sock
, 0) == 0) {
417 atomic_set(&(cs_r_l
->ready_to_write
), 0);
422 BUG_ON(bufread
> (1024 * 1024 * 1024));
423 BUG_ON(buflen
> (1024 * 1024 * 1024));
425 rc2
= cor_receive_sock(src_sock
, buf
, bufread
, flush
);
427 BUG_ON(rc2
> (1024 * 1024 * 1024));
428 if (unlikely(rc2
== 0)) {
435 cor_flush_buf(src_sock
);
438 spin_unlock_bh(&(src_sock
->rcv_lock
));
443 static int __cor_rawsocket_sendmsg(struct msghdr
*msg
, __u32 totallen
,
444 __u8 flush
, struct cor_sock
*cs_r_l
)
448 __u32 buflen
= cor_buf_optlen(totallen
, 1);
449 __u32 len
= totallen
;
453 BUG_ON(totallen
> (1024 * 1024 * 1024));
454 BUG_ON(buflen
> (1024 * 1024 * 1024));
461 if (unlikely(len
<= 0))
464 buf
= kmalloc(buflen
, GFP_KERNEL
);
465 if (unlikely(buf
== 0))
468 memset(buf
, 0, buflen
);
470 st_rc
= copy_from_iter(buf
+ bufread
, len
, &(msg
->msg_iter
));
472 if (unlikely(st_rc
!= len
)) {
477 rc
= ___cor_rawsocket_sendmsg(buf
, len
, buflen
, flush
, cs_r_l
);
484 static int _cor_rawsocket_sendmsg(struct msghdr
*msg
, __u32 totallen
,
485 struct cor_sock
*cs
, __u8 flush
)
489 BUG_ON(totallen
> (1024 * 1024 * 1024));
491 mutex_lock(&(cs
->lock
));
493 BUG_ON(cs
->type
!= CS_TYPE_UNCONNECTED
&& cs
->type
!= CS_TYPE_CONN_RAW
);
494 if (unlikely(cs
->type
== CS_TYPE_UNCONNECTED
)) {
495 mutex_unlock(&(cs
->lock
));
497 } else if (unlikely(cs
->type
!= CS_TYPE_CONN_RAW
)) {
498 mutex_unlock(&(cs
->lock
));
502 copied
= __cor_rawsocket_sendmsg(msg
, totallen
, flush
, cs
);
503 BUG_ON(copied
> 0 && ((__u32
) copied
) > totallen
);
505 mutex_unlock(&(cs
->lock
));
510 int cor_rawsocket_sendmsg(struct socket
*sock
, struct msghdr
*msg
,
513 __u8 flush
= ((msg
->msg_flags
& MSG_MORE
) == 0) ? 1 : 0;
514 int blocking
= (msg
->msg_flags
& MSG_DONTWAIT
) == 0;
519 struct cor_sock
*cs
= (struct cor_sock
*) sock
->sk
;
521 __u32 max
= (1024 * 1024 * 1024);
524 totallen
= total_len
;
525 if (unlikely(totallen
> max
|| total_len
> max
)) {
530 while (rc
>= 0 && copied
< totallen
) {
531 rc
= _cor_rawsocket_sendmsg(msg
, totallen
, cs
, flush
);
533 BUG_ON(rc
> 0 && unlikely((rc
> total_len
|| rc
> totallen
)));
535 if (rc
== -EAGAIN
&& blocking
&& copied
== 0) {
538 waitret
= wait_event_interruptible_timeout(
539 *sk_sleep(&(cs
->sk
)),
540 atomic_read(&(cs
->ready_to_write
)) != 0,
543 if (unlikely(waitret
< 0))
544 rc
= sock_intr_errno(cs
->sk
.sk_sndtimeo
);
545 else if (unlikely(waitret
== 0))
551 if (rc
> 0 || copied
== 0)
553 if (unlikely(rc
== -EFAULT
))
556 BUG_ON(copied
> 0 && ((__u32
) copied
> totallen
));
562 static int __cor_rawsocket_recvmsg(struct msghdr
*msg
, __u32 totallen
,
565 struct cor_data_buf_item
*dbi
= cs
->data
.conn_raw
.rcvitem
;
571 BUG_ON(totallen
> (1024 * 1024 * 1024));
576 BUG_ON(dbi
->datalen
<= cs
->data
.conn_raw
.rcvoffset
);
579 if (len
> (dbi
->datalen
- cs
->data
.conn_raw
.rcvoffset
))
580 len
= dbi
->datalen
- cs
->data
.conn_raw
.rcvoffset
;
582 if (unlikely(len
<= 0))
585 st_rc
= copy_to_iter(dbi
->buf
+ cs
->data
.conn_raw
.rcvoffset
, len
,
588 if (unlikely(st_rc
!= len
))
592 cs
->data
.conn_raw
.rcvoffset
+= len
;
593 if (dbi
->datalen
== cs
->data
.conn_raw
.rcvoffset
) {
594 cor_databuf_item_free(cs
->data
.conn_raw
.rcvitem
);
595 cs
->data
.conn_raw
.rcvitem
= 0;
596 cs
->data
.conn_raw
.rcvoffset
= 0;
599 BUG_ON(written
> totallen
);
604 static int _cor_rawsocket_recvmsg(struct msghdr
*msg
, __u32 totallen
,
605 struct cor_sock
*cs_r
)
610 struct cor_conn
*trgt_sock
;
612 mutex_lock(&(cs_r
->lock
));
614 BUG_ON(cs_r
->type
!= CS_TYPE_CONN_RAW
);
616 trgt_sock
= cs_r
->data
.conn_raw
.trgt_sock
;
618 if (unlikely(cs_r
->data
.conn_raw
.src_sock
== 0 || trgt_sock
== 0)) {
619 mutex_unlock(&(cs_r
->lock
));
623 cor_conn_kref_get(trgt_sock
, "stack");
625 while (rc
>= 0 && copied
< totallen
) {
626 if (cs_r
->data
.conn_raw
.rcvitem
!= 0)
629 spin_lock_bh(&(trgt_sock
->rcv_lock
));
630 if (unlikely(unlikely(cor_is_trgt_sock(trgt_sock
, cs_r
) == 0) |
631 unlikely(trgt_sock
->isreset
!= 0))) {
632 spin_unlock_bh(&(trgt_sock
->rcv_lock
));
633 cor_conn_kref_put(trgt_sock
, "stack");
634 mutex_unlock(&(cs_r
->lock
));
638 cor_databuf_pull_dbi(cs_r
, trgt_sock
);
639 if (cs_r
->data
.conn_raw
.rcvitem
== 0)
640 atomic_set(&(cs_r
->ready_to_read
), 0);
642 cor_bufsize_read_to_sock(trgt_sock
);
644 spin_unlock_bh(&(trgt_sock
->rcv_lock
));
647 rc
= __cor_rawsocket_recvmsg(msg
, totallen
- copied
, cs_r
);
649 if (rc
> 0 || copied
== 0)
651 if (unlikely(rc
== -EFAULT
))
654 BUG_ON(copied
> 0 && ((__u32
) copied
> totallen
));
657 mutex_unlock(&(cs_r
->lock
));
659 if (likely(copied
> 0))
660 cor_wake_sender(trgt_sock
);
662 cor_conn_kref_put(trgt_sock
, "stack");
667 int cor_rawsocket_recvmsg(struct socket
*sock
, struct msghdr
*msg
,
668 size_t total_len
, int flags
)
670 struct cor_sock
*cs
= (struct cor_sock
*) sock
->sk
;
672 int blocking
= (flags
& MSG_DONTWAIT
) == 0;
675 __u32 max
= (1024 * 1024 * 1024);
678 totallen
= total_len
;
679 if (unlikely(totallen
> max
|| total_len
> max
))
682 if (unlikely((flags
& MSG_PEEK
) != 0))
685 mutex_lock(&(cs
->lock
));
686 BUG_ON(cs
->type
!= CS_TYPE_UNCONNECTED
&& cs
->type
!= CS_TYPE_CONN_RAW
);
687 if (unlikely(cs
->type
== CS_TYPE_UNCONNECTED
)) {
688 mutex_unlock(&(cs
->lock
));
690 } else if (unlikely(cs
->type
!= CS_TYPE_CONN_RAW
)) {
691 mutex_unlock(&(cs
->lock
));
694 mutex_unlock(&(cs
->lock
));
697 rc
= _cor_rawsocket_recvmsg(msg
, totallen
, cs
);
699 BUG_ON(rc
> 0 && unlikely((rc
> total_len
|| rc
> totallen
)));
701 if (rc
== -EAGAIN
&& blocking
) {
702 if (wait_event_interruptible(*sk_sleep(&(cs
->sk
)),
703 atomic_read(&(cs
->ready_to_read
)) != 0) == 0)
711 static unsigned int cor_rawsocket_poll(struct file
*file
, struct socket
*sock
,
714 struct cor_sock
*cs
= (struct cor_sock
*) sock
->sk
;
715 sock_poll_wait(file
, sock
, wait
);
716 return _cor_rawsocket_poll(cs
, U32_MAX
, 1);
720 const struct proto_ops cor_raw_proto_ops
= {
722 .owner
= THIS_MODULE
,
723 .release
= cor_rawsocket_release
,
724 .bind
= cor_rawsocket_bind
,
725 .connect
= cor_rawsocket_connect
,
726 .accept
= cor_rawsocket_accept
,
727 .listen
= cor_rawsocket_listen
,
728 .shutdown
= cor_rawsocket_shutdown
,
729 .ioctl
= cor_rawsocket_ioctl
,
730 .setsockopt
= cor_rawsocket_setsockopt
,
731 .getsockopt
= cor_rawsocket_getsockopt
,
733 .combat_ioctl
= cor_rawsocket_ioctl
,
734 .compat_setsockopt
= cor_rawsocket_setsockopt
,
735 .compat_getsockopt
= cor_rawsocket_getsockopt
,
737 .sendmsg
= cor_rawsocket_sendmsg
,
738 .recvmsg
= cor_rawsocket_recvmsg
,
739 .poll
= cor_rawsocket_poll
,
740 .socketpair
= cor_socket_socketpair
,
741 .getname
= cor_socket_getname
,
742 .mmap
= cor_socket_mmap
,
744 /* sendpage, splice_read, are optional */
747 int cor_create_raw_sock(struct net
*net
, struct socket
*sock
, int protocol
,
750 int rc
= _cor_createsock(net
, sock
, protocol
, kern
, 1);
755 sock
->ops
= &cor_raw_proto_ops
;
760 MODULE_LICENSE("GPL");