convert rcv_conn_list to snd_conn_list
[cor.git] / net / cor / sock_raw.c
blob884f0d60c280239abc65c9c9d31ac4bad72425f1
1 /**
2 * Connection oriented routing
3 * Copyright (C) 2007-2021 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include <net/sock.h>
22 #include <linux/net.h>
23 #include <asm/uaccess.h>
25 #include "cor.h"
27 static int cor_rawsocket_release_trypasssocket(struct cor_sock *cs)
29 struct cor_sock *passto;
30 struct cor_conn *src_sock;
31 struct cor_conn *trgt_sock;
32 int rc = 0;
34 mutex_lock(&(cs->lock));
36 BUG_ON(cs->type != CS_TYPE_CONN_RAW);
37 passto = cs->data.conn_raw.pass_on_close;
38 cs->data.conn_raw.pass_on_close = 0;
40 src_sock = cs->data.conn_raw.src_sock;
41 trgt_sock = cs->data.conn_raw.trgt_sock;
43 mutex_unlock(&(cs->lock));
45 if (passto == 0)
46 return 0;
48 mutex_lock(&(passto->lock));
49 spin_lock_bh(&(src_sock->rcv_lock));
50 spin_lock_bh(&(trgt_sock->rcv_lock));
52 BUG_ON(src_sock->is_client == 0);
53 BUG_ON(passto->type != CS_TYPE_CONN_MANAGED);
55 if (unlikely(unlikely(passto->isreleased != 0) ||
56 unlikely(passto->data.conn_managed.connect_state !=
57 CS_CONNECTSTATE_CONNECTING)))
58 goto out;
60 BUG_ON(passto->data.conn_managed.src_sock != 0);
61 BUG_ON(passto->data.conn_managed.trgt_sock != 0);
63 if (unlikely(unlikely(src_sock->isreset != 0) ||
64 unlikely(trgt_sock->isreset != 0))) {
65 __cor_set_sock_connecterror(passto, -ENETUNREACH);
66 goto out;
69 BUG_ON(src_sock->sourcetype != SOURCE_SOCK);
70 BUG_ON(src_sock->source.sock.cs != cs);
71 BUG_ON(trgt_sock->targettype != TARGET_SOCK);
72 BUG_ON(trgt_sock->target.sock.cs != cs);
74 passto->data.conn_managed.src_sock = src_sock;
75 passto->data.conn_managed.trgt_sock = trgt_sock;
76 src_sock->source.sock.cs = passto;
77 trgt_sock->target.sock.cs = passto;
78 kref_get(&(passto->ref));
79 kref_get(&(passto->ref));
81 src_sock->source.sock.priority = cs->priority;
82 BUG_ON(passto->data.conn_managed.rcv_buf == 0);
83 trgt_sock->target.sock.socktype = SOCKTYPE_MANAGED;
84 trgt_sock->target.sock.rcv_buf_state = RCV_BUF_STATE_INCOMPLETE;
85 trgt_sock->target.sock.rcv_buf = passto->data.conn_managed.rcv_buf;
86 trgt_sock->target.sock.rcvd = 0;
88 cor_conn_refresh_priority(src_sock, 1);
90 rc = 1;
92 out:
93 spin_unlock_bh(&(trgt_sock->rcv_lock));
94 spin_unlock_bh(&(src_sock->rcv_lock));
95 mutex_unlock(&(passto->lock));
97 if (rc != 0) {
98 mutex_lock(&(cs->lock));
99 cs->data.conn_raw.src_sock = 0;
100 cs->data.conn_raw.trgt_sock = 0;
101 mutex_unlock(&(cs->lock));
103 lock_sock(&(cs->sk));
104 cs->sk.sk_socket->state = SS_CONNECTED;
105 release_sock(&(cs->sk));
107 mutex_lock(&(passto->lock));
108 BUG_ON(passto->type != CS_TYPE_CONN_MANAGED);
109 passto->data.conn_managed.connect_state =
110 CS_CONNECTSTATE_CONNECTED;
111 if (likely(passto->isreleased == 0)) {
112 atomic_set(&(passto->ready_to_write), 1);
113 barrier();
114 passto->sk.sk_state_change(&(passto->sk));
116 mutex_unlock(&(passto->lock));
119 /* pointers from struct cor_conn */
120 kref_put(&(cs->ref), cor_kreffree_bug);
121 kref_put(&(cs->ref), cor_kreffree_bug);
124 kref_put(&(passto->ref), cor_free_sock);
126 return rc;
129 int cor_rawsocket_release(struct socket *sock)
131 struct cor_sock *cs = (struct cor_sock *) sock->sk;
132 __u8 type;
134 mutex_lock(&(cs->lock));
135 cs->isreleased = 1;
136 type = cs->type;
137 mutex_unlock(&(cs->lock));
139 if (type == CS_TYPE_UNCONNECTED) {
140 } else if (type == CS_TYPE_CONN_RAW) {
141 mutex_lock(&(cs->lock));
142 BUG_ON(cs->type != CS_TYPE_CONN_RAW);
143 if (cs->data.conn_raw.rcvitem != 0) {
144 BUG_ON(cs->data.conn_raw.trgt_sock == 0);
146 cor_databuf_unpull_dpi(cs->data.conn_raw.trgt_sock, cs,
147 cs->data.conn_raw.rcvitem,
148 cs->data.conn_raw.rcvoffset);
149 cs->data.conn_raw.rcvitem = 0;
151 mutex_unlock(&(cs->lock));
153 if (cor_rawsocket_release_trypasssocket(cs) != 0)
154 goto out;
156 mutex_lock(&(cs->lock));
157 BUG_ON(cs->type != CS_TYPE_CONN_RAW);
158 if (cs->data.conn_raw.src_sock != 0 &&
159 cs->data.conn_raw.trgt_sock != 0) {
160 cor_reset_conn(cs->data.conn_raw.src_sock);
161 cor_conn_kref_put_bug(cs->data.conn_raw.src_sock,
162 "socket");
163 cor_conn_kref_put(cs->data.conn_raw.trgt_sock,
164 "socket");
165 cs->data.conn_raw.src_sock = 0;
166 cs->data.conn_raw.trgt_sock = 0;
168 mutex_unlock(&(cs->lock));
169 } else {
170 BUG();
173 out:
174 kref_put(&(cs->ref), cor_free_sock);
176 return 0;
179 int cor_rawsocket_bind(struct socket *sock, struct sockaddr *saddr,
180 int sockaddr_len)
182 return -EROFS;
185 int cor_rawsocket_connect(struct socket *sock, struct sockaddr *saddr,
186 int sockaddr_len, int flags)
188 struct cor_sock *cs = (struct cor_sock *) sock->sk;
190 struct cor_conn_bidir *cnb;
191 struct cor_conn *src_sock;
192 struct cor_conn *trgt_sock;
194 cnb = cor_alloc_conn(GFP_KERNEL, cs->is_highlatency);
196 if (unlikely(cnb == 0))
197 return -ETIMEDOUT;
199 src_sock = &(cnb->cli);
200 trgt_sock = &(cnb->srv);
202 mutex_lock(&(cs->lock));
203 spin_lock_bh(&(src_sock->rcv_lock));
204 spin_lock_bh(&(trgt_sock->rcv_lock));
205 if (cs->type != CS_TYPE_UNCONNECTED) {
206 spin_unlock_bh(&(trgt_sock->rcv_lock));
207 spin_unlock_bh(&(src_sock->rcv_lock));
208 mutex_unlock(&(cs->lock));
209 cor_reset_conn(src_sock);
210 cor_conn_kref_put(src_sock, "alloc_conn");
211 return -EISCONN;
214 cor_conn_init_sock_source(src_sock);
215 cor_conn_init_sock_target(trgt_sock);
217 memset(&(cs->data), 0, sizeof(cs->data));
218 cs->type = CS_TYPE_CONN_RAW;
219 cs->data.conn_raw.src_sock = src_sock;
220 cs->data.conn_raw.trgt_sock = trgt_sock;
221 cor_conn_kref_get(src_sock, "socket");
222 cor_conn_kref_get(trgt_sock, "socket");
224 src_sock->source.sock.cs = cs;
225 trgt_sock->target.sock.cs = cs;
226 kref_get(&(cs->ref));
227 kref_get(&(cs->ref));
229 trgt_sock->target.sock.socktype = SOCKTYPE_RAW;
230 src_sock->source.sock.priority = cs->priority;
232 cor_conn_refresh_priority(src_sock, 1);
234 spin_unlock_bh(&(trgt_sock->rcv_lock));
235 spin_unlock_bh(&(src_sock->rcv_lock));
236 mutex_unlock(&(cs->lock));
238 lock_sock(&(cs->sk));
239 sock->state = SS_CONNECTED;
240 release_sock(&(cs->sk));
242 cor_conn_kref_put(src_sock, "alloc_conn");
244 return 0;
247 int cor_rawsocket_accept(struct socket *sock, struct socket *newsock, int flags,
248 bool kern)
250 return -EINVAL;
253 int cor_rawsocket_listen(struct socket *sock, int len)
255 return -EOPNOTSUPP;
258 int cor_rawsocket_shutdown(struct socket *sock, int flags)
260 return 0;
263 int cor_rawsocket_ioctl(struct socket *sock, unsigned int cmd,
264 unsigned long arg)
266 return -ENOIOCTLCMD;
269 static int cor_rawsocket_setsockopt_passonclose(struct socket *sock,
270 char __user *optval, unsigned int optlen)
272 struct cor_sock *cs = (struct cor_sock *) sock->sk;
274 int rc = 0;
276 __be64 cookie;
277 int notread;
278 struct cor_sock *passto;
280 if (unlikely(optlen != 8))
281 return -EINVAL;
283 notread = copy_from_user((char *) &cookie, optval, 8);
284 if (unlikely(notread != 0))
285 return -EFAULT;
287 passto = cor_get_sock_by_cookie(cookie);
288 if (unlikely(passto == 0))
289 return -EINVAL;
291 mutex_lock(&(cs->lock));
292 if (unlikely(cs->type != CS_TYPE_CONN_RAW)) {
293 rc = -EINVAL;
294 goto out;
297 BUG_ON(passto->type != CS_TYPE_CONN_MANAGED);
299 if (unlikely(cs->data.conn_raw.pass_on_close != 0))
300 kref_put(&(cs->data.conn_raw.pass_on_close->ref),
301 cor_free_sock);
303 cs->data.conn_raw.pass_on_close = passto;
305 out:
306 mutex_unlock(&(cs->lock));
308 if (unlikely(rc != 0))
309 kref_put(&(passto->ref), cor_free_sock);
311 return rc;
314 int cor_rawsocket_setsockopt(struct socket *sock, int level,
315 int optname, char __user *optval, unsigned int optlen)
317 if (unlikely(level != SOL_COR)) {
318 return -ENOPROTOOPT;
321 if (optname == COR_PASS_ON_CLOSE) {
322 return cor_rawsocket_setsockopt_passonclose(sock, optval,
323 optlen);
324 } else if (optname == COR_TOS) {
325 return cor_socket_setsockopt_tos(sock, optval, optlen);
326 } else if (optname == COR_PRIORITY) {
327 return cor_socket_setsockopt_priority(sock, optval, optlen);
328 } else {
329 return -ENOPROTOOPT;
333 int cor_rawsocket_getsockopt(struct socket *sock, int level,
334 int optname, char __user *optval, int __user *optlen)
336 return -ENOPROTOOPT;
339 static unsigned int _cor_rawsocket_poll(struct cor_sock *cs, __u32 writelen,
340 int frompoll)
342 unsigned int mask = 0;
344 struct cor_conn *trgt_sock;
345 struct cor_conn *src_sock;
347 mutex_lock(&(cs->lock));
349 if (frompoll == 0) {
350 BUG_ON(cs->type != CS_TYPE_CONN_RAW);
351 } else {
352 BUG_ON(cs->type != CS_TYPE_UNCONNECTED &&
353 cs->type != CS_TYPE_CONN_RAW);
354 if (unlikely(cs->type != CS_TYPE_CONN_RAW)) {
355 mutex_unlock(&(cs->lock));
356 return 0;
360 trgt_sock = cs->data.conn_raw.trgt_sock;
361 src_sock = cs->data.conn_raw.src_sock;
363 if (unlikely(trgt_sock == 0 || src_sock == 0)) {
364 mutex_unlock(&(cs->lock));
365 return U32_MAX;
368 spin_lock_bh(&(trgt_sock->rcv_lock));
369 if (unlikely(trgt_sock->isreset != 0 ||
370 cor_is_trgt_sock(trgt_sock, cs) == 0)) {
371 mask = U32_MAX;
372 } else if (cs->data.conn_raw.rcvitem != 0 ||
373 trgt_sock->data_buf.read_remaining != 0) {
374 mask |= (POLLIN | POLLRDNORM);
376 spin_unlock_bh(&(trgt_sock->rcv_lock));
378 spin_lock_bh(&(src_sock->rcv_lock));
379 if (unlikely(src_sock->isreset != 0 ||
380 cor_is_src_sock(src_sock, cs) == 0)) {
381 mask = U32_MAX;
382 } else if (cor_sock_sndbufavailable(src_sock, 1)) {
383 mask |= (POLLOUT | POLLWRNORM);
385 spin_unlock_bh(&(src_sock->rcv_lock));
387 mutex_unlock(&(cs->lock));
389 return mask;
392 static int ___cor_rawsocket_sendmsg(char *buf, __u32 bufread,
393 __u32 buflen, __u8 flush, struct cor_sock *cs_r_l)
395 struct cor_conn *src_sock;
397 int rc = 0;
398 __u32 rc2;
400 BUG_ON(cs_r_l->type != CS_TYPE_CONN_RAW);
402 src_sock = cs_r_l->data.conn_raw.src_sock;
403 if (unlikely(src_sock == 0)) {
404 return -ENOTCONN;
407 spin_lock_bh(&(src_sock->rcv_lock));
409 if (unlikely(unlikely(cor_is_src_sock(src_sock, cs_r_l) == 0) ||
410 unlikely(src_sock->isreset != 0))) {
411 spin_unlock_bh(&(src_sock->rcv_lock));
412 return -EPIPE;
415 if (cor_sock_sndbufavailable(src_sock, 0) == 0) {
416 rc = -EAGAIN;
417 atomic_set(&(cs_r_l->ready_to_write), 0);
418 src_sock->flush = 0;
419 goto out;
422 BUG_ON(bufread > (1024 * 1024 * 1024));
423 BUG_ON(buflen > (1024 * 1024 * 1024));
425 rc2 = cor_receive_sock(src_sock, buf, bufread, flush);
427 BUG_ON(rc2 > (1024 * 1024 * 1024));
428 if (unlikely(rc2 == 0)) {
429 rc = -ENOMEM;
430 } else {
431 rc = rc2;
434 if (likely(rc > 0))
435 cor_flush_buf(src_sock);
437 out:
438 spin_unlock_bh(&(src_sock->rcv_lock));
440 return rc;
443 static int __cor_rawsocket_sendmsg(struct msghdr *msg, __u32 totallen,
444 __u8 flush, struct cor_sock *cs_r_l)
446 char *buf = 0;
447 __u32 bufread = 0;
448 __u32 buflen = cor_buf_optlen(totallen, 1);
449 __u32 len = totallen;
450 size_t st_rc;
451 int rc;
453 BUG_ON(totallen > (1024 * 1024 * 1024));
454 BUG_ON(buflen > (1024 * 1024 * 1024));
456 if (buflen < len) {
457 len = buflen;
458 flush = 0;
461 if (unlikely(len <= 0))
462 return 0;
464 buf = kmalloc(buflen, GFP_KERNEL);
465 if (unlikely(buf == 0))
466 return -ENOMEM;
468 memset(buf, 0, buflen);
470 st_rc = copy_from_iter(buf + bufread, len, &(msg->msg_iter));
472 if (unlikely(st_rc != len)) {
473 kfree(buf);
474 return -EFAULT;
477 rc = ___cor_rawsocket_sendmsg(buf, len, buflen, flush, cs_r_l);
479 kfree(buf);
481 return rc;
484 static int _cor_rawsocket_sendmsg(struct msghdr *msg, __u32 totallen,
485 struct cor_sock *cs, __u8 flush)
487 int copied;
489 BUG_ON(totallen > (1024 * 1024 * 1024));
491 mutex_lock(&(cs->lock));
493 BUG_ON(cs->type != CS_TYPE_UNCONNECTED && cs->type != CS_TYPE_CONN_RAW);
494 if (unlikely(cs->type == CS_TYPE_UNCONNECTED)) {
495 mutex_unlock(&(cs->lock));
496 return -ENOTCONN;
497 } else if (unlikely(cs->type != CS_TYPE_CONN_RAW)) {
498 mutex_unlock(&(cs->lock));
499 return -EBADF;
502 copied = __cor_rawsocket_sendmsg(msg, totallen, flush, cs);
503 BUG_ON(copied > 0 && ((__u32) copied) > totallen);
505 mutex_unlock(&(cs->lock));
507 return copied;
510 int cor_rawsocket_sendmsg(struct socket *sock, struct msghdr *msg,
511 size_t total_len)
513 __u8 flush = ((msg->msg_flags & MSG_MORE) == 0) ? 1 : 0;
514 int blocking = (msg->msg_flags & MSG_DONTWAIT) == 0;
516 int rc = 0;
517 int copied = 0;
519 struct cor_sock *cs = (struct cor_sock *) sock->sk;
521 __u32 max = (1024 * 1024 * 1024);
522 __u32 totallen;
524 totallen = total_len;
525 if (unlikely(totallen > max || total_len > max)) {
526 totallen = max;
527 flush = 0;
530 while (rc >= 0 && copied < totallen) {
531 rc = _cor_rawsocket_sendmsg(msg, totallen, cs, flush);
533 BUG_ON(rc > 0 && unlikely((rc > total_len || rc > totallen)));
535 if (rc == -EAGAIN && blocking && copied == 0) {
536 long waitret;
538 waitret = wait_event_interruptible_timeout(
539 *sk_sleep(&(cs->sk)),
540 atomic_read(&(cs->ready_to_write)) != 0,
541 cs->sk.sk_sndtimeo);
543 if (unlikely(waitret < 0))
544 rc = sock_intr_errno(cs->sk.sk_sndtimeo);
545 else if (unlikely(waitret == 0))
546 rc = -ETIMEDOUT;
547 else
548 continue;
551 if (rc > 0 || copied == 0)
552 copied += rc;
553 if (unlikely(rc == -EFAULT))
554 copied = rc;
556 BUG_ON(copied > 0 && ((__u32) copied > totallen));
559 return rc;
562 static int __cor_rawsocket_recvmsg(struct msghdr *msg, __u32 totallen,
563 struct cor_sock *cs)
565 struct cor_data_buf_item *dbi = cs->data.conn_raw.rcvitem;
566 __u32 written = 0;
568 __u32 len;
569 size_t st_rc;
571 BUG_ON(totallen > (1024 * 1024 * 1024));
573 if (dbi == 0)
574 return -EAGAIN;
576 BUG_ON(dbi->datalen <= cs->data.conn_raw.rcvoffset);
578 len = totallen;
579 if (len > (dbi->datalen - cs->data.conn_raw.rcvoffset))
580 len = dbi->datalen - cs->data.conn_raw.rcvoffset;
582 if (unlikely(len <= 0))
583 return -EAGAIN;
585 st_rc = copy_to_iter(dbi->buf + cs->data.conn_raw.rcvoffset, len,
586 &(msg->msg_iter));
588 if (unlikely(st_rc != len))
589 return -EFAULT;
591 written += len;
592 cs->data.conn_raw.rcvoffset += len;
593 if (dbi->datalen == cs->data.conn_raw.rcvoffset) {
594 cor_databuf_item_free(cs->data.conn_raw.rcvitem);
595 cs->data.conn_raw.rcvitem = 0;
596 cs->data.conn_raw.rcvoffset = 0;
599 BUG_ON(written > totallen);
601 return written;
604 static int _cor_rawsocket_recvmsg(struct msghdr *msg, __u32 totallen,
605 struct cor_sock *cs_r)
607 int copied = 0;
608 int rc = 0;
610 struct cor_conn *trgt_sock;
612 mutex_lock(&(cs_r->lock));
614 BUG_ON(cs_r->type != CS_TYPE_CONN_RAW);
616 trgt_sock = cs_r->data.conn_raw.trgt_sock;
618 if (unlikely(cs_r->data.conn_raw.src_sock == 0 || trgt_sock == 0)) {
619 mutex_unlock(&(cs_r->lock));
620 return -ENOTCONN;
623 cor_conn_kref_get(trgt_sock, "stack");
625 while (rc >= 0 && copied < totallen) {
626 if (cs_r->data.conn_raw.rcvitem != 0)
627 goto recv;
629 spin_lock_bh(&(trgt_sock->rcv_lock));
630 if (unlikely(unlikely(cor_is_trgt_sock(trgt_sock, cs_r) == 0) |
631 unlikely(trgt_sock->isreset != 0))) {
632 spin_unlock_bh(&(trgt_sock->rcv_lock));
633 cor_conn_kref_put(trgt_sock, "stack");
634 mutex_unlock(&(cs_r->lock));
635 return -EPIPE;
638 cor_databuf_pull_dbi(cs_r, trgt_sock);
639 if (cs_r->data.conn_raw.rcvitem == 0)
640 atomic_set(&(cs_r->ready_to_read), 0);
642 cor_bufsize_read_to_sock(trgt_sock);
644 spin_unlock_bh(&(trgt_sock->rcv_lock));
646 recv:
647 rc = __cor_rawsocket_recvmsg(msg, totallen - copied, cs_r);
649 if (rc > 0 || copied == 0)
650 copied += rc;
651 if (unlikely(rc == -EFAULT))
652 copied = rc;
654 BUG_ON(copied > 0 && ((__u32) copied > totallen));
657 mutex_unlock(&(cs_r->lock));
659 if (likely(copied > 0))
660 cor_wake_sender(trgt_sock);
662 cor_conn_kref_put(trgt_sock, "stack");
664 return copied;
667 int cor_rawsocket_recvmsg(struct socket *sock, struct msghdr *msg,
668 size_t total_len, int flags)
670 struct cor_sock *cs = (struct cor_sock *) sock->sk;
672 int blocking = (flags & MSG_DONTWAIT) == 0;
674 int rc = 0;
675 __u32 max = (1024 * 1024 * 1024);
676 __u32 totallen;
678 totallen = total_len;
679 if (unlikely(totallen > max || total_len > max))
680 totallen = max;
682 if (unlikely((flags & MSG_PEEK) != 0))
683 return -EINVAL;
685 mutex_lock(&(cs->lock));
686 BUG_ON(cs->type != CS_TYPE_UNCONNECTED && cs->type != CS_TYPE_CONN_RAW);
687 if (unlikely(cs->type == CS_TYPE_UNCONNECTED)) {
688 mutex_unlock(&(cs->lock));
689 return -ENOTCONN;
690 } else if (unlikely(cs->type != CS_TYPE_CONN_RAW)) {
691 mutex_unlock(&(cs->lock));
692 return -EBADF;
694 mutex_unlock(&(cs->lock));
696 recv:
697 rc = _cor_rawsocket_recvmsg(msg, totallen, cs);
699 BUG_ON(rc > 0 && unlikely((rc > total_len || rc > totallen)));
701 if (rc == -EAGAIN && blocking) {
702 if (wait_event_interruptible(*sk_sleep(&(cs->sk)),
703 atomic_read(&(cs->ready_to_read)) != 0) == 0)
704 goto recv;
705 rc = -ERESTARTSYS;
708 return rc;
711 static unsigned int cor_rawsocket_poll(struct file *file, struct socket *sock,
712 poll_table *wait)
714 struct cor_sock *cs = (struct cor_sock *) sock->sk;
715 sock_poll_wait(file, sock, wait);
716 return _cor_rawsocket_poll(cs, U32_MAX, 1);
720 const struct proto_ops cor_raw_proto_ops = {
721 .family = PF_COR,
722 .owner = THIS_MODULE,
723 .release = cor_rawsocket_release,
724 .bind = cor_rawsocket_bind,
725 .connect = cor_rawsocket_connect,
726 .accept = cor_rawsocket_accept,
727 .listen = cor_rawsocket_listen,
728 .shutdown = cor_rawsocket_shutdown,
729 .ioctl = cor_rawsocket_ioctl,
730 .setsockopt = cor_rawsocket_setsockopt,
731 .getsockopt = cor_rawsocket_getsockopt,
732 #ifdef CONFIG_COMPAT
733 .combat_ioctl = cor_rawsocket_ioctl,
734 .compat_setsockopt = cor_rawsocket_setsockopt,
735 .compat_getsockopt = cor_rawsocket_getsockopt,
736 #endif
737 .sendmsg = cor_rawsocket_sendmsg,
738 .recvmsg = cor_rawsocket_recvmsg,
739 .poll = cor_rawsocket_poll,
740 .socketpair = cor_socket_socketpair,
741 .getname = cor_socket_getname,
742 .mmap = cor_socket_mmap,
744 /* sendpage, splice_read, are optional */
747 int cor_create_raw_sock(struct net *net, struct socket *sock, int protocol,
748 int kern)
750 int rc = _cor_createsock(net, sock, protocol, kern, 1);
752 if (rc != 0)
753 return rc;
755 sock->ops = &cor_raw_proto_ops;
757 return 0;
760 MODULE_LICENSE("GPL");