add debugging code
[cor.git] / net / cor / sock_raw.c
blob964034a23558fc12de2ae34c58cc37eee4a9785f
1 /**
2 * Connection oriented routing
3 * Copyright (C) 2007-2021 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <net/sock.h>
17 #include <linux/net.h>
18 #include <linux/uaccess.h>
20 #include "cor.h"
22 static int cor_rawsocket_release_trypasssocket(struct cor_sock *cs)
24 struct cor_sock *passto;
25 struct cor_conn *src_sock;
26 struct cor_conn *trgt_sock;
27 int rc = 0;
29 mutex_lock(&cs->lock);
31 BUG_ON(cs->type != CS_TYPE_CONN_RAW);
32 passto = cs->data.conn_raw.pass_on_close;
33 cs->data.conn_raw.pass_on_close = 0;
35 src_sock = cs->data.conn_raw.src_sock;
36 trgt_sock = cs->data.conn_raw.trgt_sock;
38 mutex_unlock(&cs->lock);
40 if (passto == 0)
41 return 0;
43 mutex_lock(&passto->lock);
44 spin_lock_bh(&src_sock->rcv_lock);
45 spin_lock_bh(&trgt_sock->rcv_lock);
47 BUG_ON(src_sock->is_client == 0);
48 BUG_ON(passto->type != CS_TYPE_CONN_MANAGED);
50 if (unlikely(unlikely(passto->isreleased != 0) ||
51 unlikely(passto->data.conn_managed.connect_state !=
52 CS_CONNECTSTATE_CONNECTING)))
53 goto out;
55 BUG_ON(passto->data.conn_managed.src_sock != 0);
56 BUG_ON(passto->data.conn_managed.trgt_sock != 0);
58 if (unlikely(unlikely(src_sock->isreset != 0) ||
59 unlikely(trgt_sock->isreset != 0))) {
60 __cor_set_sock_connecterror(passto, ENETUNREACH);
61 goto out;
64 BUG_ON(src_sock->sourcetype != SOURCE_SOCK);
65 BUG_ON(src_sock->src.sock.ed->cs != cs);
66 BUG_ON(trgt_sock->targettype != TARGET_SOCK);
67 BUG_ON(trgt_sock->trgt.sock.cs != cs);
69 passto->data.conn_managed.src_sock = src_sock;
70 passto->data.conn_managed.trgt_sock = trgt_sock;
71 src_sock->src.sock.ed->cs = passto;
72 trgt_sock->trgt.sock.cs = passto;
73 kref_get(&passto->ref);
74 kref_get(&passto->ref);
76 src_sock->src.sock.ed->priority = cs->priority;
78 cor_set_conn_is_highlatency(src_sock, passto->is_highlatency, 1, 1);
80 BUG_ON(passto->data.conn_managed.rcv_buf == 0);
81 src_sock->src.sock.socktype = SOCKTYPE_MANAGED;
82 trgt_sock->trgt.sock.socktype = SOCKTYPE_MANAGED;
83 trgt_sock->trgt.sock.rcv_buf_state = RCV_BUF_STATE_INCOMPLETE;
84 trgt_sock->trgt.sock.rcv_buf = passto->data.conn_managed.rcv_buf;
85 trgt_sock->trgt.sock.rcvd = 0;
87 BUG_ON(src_sock->src.sock.keepalive_intransit != 0);
88 src_sock->src.sock.ed->jiffies_keepalive_lastact = jiffies -
89 KEEPALIVE_INTERVAL_SECS * HZ + HZ;
90 cor_keepalive_req_sched_timer(src_sock);
92 cor_conn_refresh_priority(src_sock, 1);
94 rc = 1;
96 out:
97 spin_unlock_bh(&trgt_sock->rcv_lock);
98 spin_unlock_bh(&src_sock->rcv_lock);
99 mutex_unlock(&passto->lock);
101 if (rc != 0) {
102 mutex_lock(&cs->lock);
103 cs->data.conn_raw.src_sock = 0;
104 cs->data.conn_raw.trgt_sock = 0;
105 mutex_unlock(&cs->lock);
107 lock_sock(&cs->sk);
108 cs->sk.sk_socket->state = SS_CONNECTED;
109 release_sock(&cs->sk);
111 mutex_lock(&passto->lock);
112 BUG_ON(passto->type != CS_TYPE_CONN_MANAGED);
113 passto->data.conn_managed.connect_state =
114 CS_CONNECTSTATE_CONNECTED;
115 if (likely(passto->isreleased == 0)) {
116 atomic_set(&passto->ready_to_write, 1);
117 barrier();
118 passto->sk.sk_state_change(&passto->sk);
120 mutex_unlock(&passto->lock);
123 /* pointers from struct cor_conn */
124 kref_put(&cs->ref, cor_kreffree_bug);
125 kref_put(&cs->ref, cor_kreffree_bug);
128 kref_put(&passto->ref, cor_free_sock);
130 return rc;
133 int cor_rawsocket_release(struct socket *sock)
135 struct cor_sock *cs = (struct cor_sock *) sock->sk;
136 __u8 type;
138 mutex_lock(&cs->lock);
139 cs->isreleased = 1;
140 type = cs->type;
141 mutex_unlock(&cs->lock);
143 if (type == CS_TYPE_UNCONNECTED) {
144 } else if (type == CS_TYPE_CONN_RAW) {
145 mutex_lock(&cs->lock);
146 BUG_ON(cs->type != CS_TYPE_CONN_RAW);
147 if (cs->data.conn_raw.rcvitem != 0) {
148 BUG_ON(cs->data.conn_raw.trgt_sock == 0);
150 cor_databuf_unpull_dpi(cs->data.conn_raw.trgt_sock, cs,
151 cs->data.conn_raw.rcvitem,
152 cs->data.conn_raw.rcvoffset);
153 cs->data.conn_raw.rcvitem = 0;
155 mutex_unlock(&cs->lock);
157 if (cor_rawsocket_release_trypasssocket(cs) != 0)
158 goto out;
160 mutex_lock(&cs->lock);
161 BUG_ON(cs->type != CS_TYPE_CONN_RAW);
162 if (cs->data.conn_raw.src_sock != 0 &&
163 cs->data.conn_raw.trgt_sock != 0) {
164 cor_reset_conn(cs->data.conn_raw.src_sock);
165 cor_conn_kref_put_bug(cs->data.conn_raw.src_sock,
166 "socket");
167 cor_conn_kref_put(cs->data.conn_raw.trgt_sock,
168 "socket");
169 cs->data.conn_raw.src_sock = 0;
170 cs->data.conn_raw.trgt_sock = 0;
172 mutex_unlock(&cs->lock);
173 } else {
174 BUG();
177 out:
178 kref_put(&cs->ref, cor_free_sock);
180 return 0;
183 int cor_rawsocket_bind(struct socket *sock, struct sockaddr *saddr,
184 int sockaddr_len)
186 return -EROFS;
189 int cor_rawsocket_connect(struct socket *sock, struct sockaddr *saddr,
190 int sockaddr_len, int flags)
192 struct cor_sock *cs = (struct cor_sock *) sock->sk;
194 struct cor_conn_bidir *cnb;
195 struct cor_conn *src_sock;
196 struct cor_conn *trgt_sock;
198 mutex_lock(&cs->lock);
199 if (cs->type != CS_TYPE_UNCONNECTED) {
200 mutex_unlock(&cs->lock);
201 return -EISCONN;
204 cnb = cor_alloc_conn(GFP_KERNEL, cs->is_highlatency);
206 if (unlikely(cnb == 0)) {
207 mutex_unlock(&cs->lock);
208 return -ETIMEDOUT;
211 src_sock = &cnb->cli;
212 trgt_sock = &cnb->srv;
214 spin_lock_bh(&src_sock->rcv_lock);
215 spin_lock_bh(&trgt_sock->rcv_lock);
217 if (cor_conn_init_sock_source(src_sock) != 0) {
218 spin_unlock_bh(&trgt_sock->rcv_lock);
219 spin_unlock_bh(&src_sock->rcv_lock);
220 mutex_unlock(&cs->lock);
221 cor_reset_conn(src_sock);
222 cor_conn_kref_put(src_sock, "alloc_conn");
223 return -EISCONN;
225 cor_conn_init_sock_target(trgt_sock);
227 memset(&cs->data, 0, sizeof(cs->data));
228 cs->type = CS_TYPE_CONN_RAW;
229 cs->data.conn_raw.src_sock = src_sock;
230 cs->data.conn_raw.trgt_sock = trgt_sock;
231 cor_conn_kref_get(src_sock, "socket");
232 cor_conn_kref_get(trgt_sock, "socket");
234 src_sock->src.sock.ed->cs = cs;
235 trgt_sock->trgt.sock.cs = cs;
236 kref_get(&cs->ref);
237 kref_get(&cs->ref);
239 src_sock->src.sock.socktype = SOCKTYPE_RAW;
240 trgt_sock->trgt.sock.socktype = SOCKTYPE_RAW;
241 src_sock->src.sock.ed->priority = cs->priority;
243 cor_conn_refresh_priority(src_sock, 1);
245 spin_unlock_bh(&trgt_sock->rcv_lock);
246 spin_unlock_bh(&src_sock->rcv_lock);
247 mutex_unlock(&cs->lock);
249 lock_sock(&cs->sk);
250 sock->state = SS_CONNECTED;
251 release_sock(&cs->sk);
253 cor_conn_kref_put(src_sock, "alloc_conn");
255 return 0;
258 int cor_rawsocket_accept(struct socket *sock, struct socket *newsock, int flags,
259 bool kern)
261 return -EINVAL;
264 int cor_rawsocket_listen(struct socket *sock, int len)
266 return -EOPNOTSUPP;
269 int cor_rawsocket_shutdown(struct socket *sock, int flags)
271 return 0;
274 int cor_rawsocket_ioctl(struct socket *sock, unsigned int cmd,
275 unsigned long arg)
277 return -ENOIOCTLCMD;
280 static int cor_rawsocket_setsockopt_passonclose(struct socket *sock,
281 char __user *optval, unsigned int optlen)
283 struct cor_sock *cs = (struct cor_sock *) sock->sk;
285 int rc = 0;
287 __be64 cookie;
288 int notread;
289 struct cor_sock *passto;
291 if (unlikely(optlen != 8))
292 return -EINVAL;
294 notread = copy_from_user((char *) &cookie, optval, 8);
295 if (unlikely(notread != 0))
296 return -EFAULT;
298 passto = cor_get_sock_by_cookie(cookie);
299 if (unlikely(passto == 0))
300 return -EINVAL;
302 mutex_lock(&cs->lock);
303 if (unlikely(cs->type != CS_TYPE_CONN_RAW)) {
304 rc = -EINVAL;
305 goto out;
308 BUG_ON(passto->type != CS_TYPE_CONN_MANAGED);
310 if (unlikely(cs->data.conn_raw.pass_on_close != 0))
311 kref_put(&cs->data.conn_raw.pass_on_close->ref, cor_free_sock);
313 cs->data.conn_raw.pass_on_close = passto;
315 out:
316 mutex_unlock(&cs->lock);
318 if (unlikely(rc != 0))
319 kref_put(&passto->ref, cor_free_sock);
321 return rc;
324 int cor_rawsocket_setsockopt(struct socket *sock, int level,
325 int optname, char __user *optval, unsigned int optlen)
327 if (unlikely(level != SOL_COR))
328 return -ENOPROTOOPT;
330 if (optname == COR_PASS_ON_CLOSE) {
331 return cor_rawsocket_setsockopt_passonclose(sock, optval,
332 optlen);
333 } else if (optname == COR_TOS) {
334 return cor_socket_setsockopt_tos(sock, optval, optlen);
335 } else if (optname == COR_PRIORITY) {
336 return cor_socket_setsockopt_priority(sock, optval, optlen);
337 } else {
338 return -ENOPROTOOPT;
342 int cor_rawsocket_getsockopt(struct socket *sock, int level,
343 int optname, char __user *optval, int __user *optlen)
345 return -ENOPROTOOPT;
348 static unsigned int _cor_rawsocket_poll(struct cor_sock *cs, __u32 writelen,
349 int frompoll)
351 unsigned int mask = 0;
353 struct cor_conn *trgt_sock;
354 struct cor_conn *src_sock;
356 mutex_lock(&cs->lock);
358 if (frompoll == 0) {
359 BUG_ON(cs->type != CS_TYPE_CONN_RAW);
360 } else {
361 BUG_ON(cs->type != CS_TYPE_UNCONNECTED &&
362 cs->type != CS_TYPE_CONN_RAW);
363 if (unlikely(cs->type != CS_TYPE_CONN_RAW)) {
364 mutex_unlock(&cs->lock);
365 return 0;
369 trgt_sock = cs->data.conn_raw.trgt_sock;
370 src_sock = cs->data.conn_raw.src_sock;
372 if (unlikely(trgt_sock == 0 || src_sock == 0)) {
373 mutex_unlock(&cs->lock);
374 return U32_MAX;
377 spin_lock_bh(&trgt_sock->rcv_lock);
378 if (unlikely(trgt_sock->isreset != 0 ||
379 cor_is_trgt_sock(trgt_sock, cs) == 0)) {
380 mask = U32_MAX;
381 } else if (cs->data.conn_raw.rcvitem != 0 ||
382 trgt_sock->data_buf.read_remaining != 0) {
383 mask |= (POLLIN | POLLRDNORM);
385 spin_unlock_bh(&trgt_sock->rcv_lock);
387 spin_lock_bh(&src_sock->rcv_lock);
388 if (unlikely(src_sock->isreset != 0 ||
389 cor_is_src_sock(src_sock, cs) == 0)) {
390 mask = U32_MAX;
391 } else if (cor_sock_sndbufavailable(src_sock, 1)) {
392 mask |= (POLLOUT | POLLWRNORM);
394 spin_unlock_bh(&src_sock->rcv_lock);
396 mutex_unlock(&cs->lock);
398 return mask;
401 static int ___cor_rawsocket_sendmsg(char *buf, __u32 bufread,
402 __u32 buflen, __u8 flush, struct cor_sock *cs_r_l)
404 struct cor_conn *src_sock;
406 int rc = 0;
407 __u32 rc2;
409 BUG_ON(cs_r_l->type != CS_TYPE_CONN_RAW);
411 src_sock = cs_r_l->data.conn_raw.src_sock;
412 if (unlikely(src_sock == 0))
413 return -ENOTCONN;
415 spin_lock_bh(&src_sock->rcv_lock);
417 if (unlikely(unlikely(cor_is_src_sock(src_sock, cs_r_l) == 0) ||
418 unlikely(src_sock->isreset != 0))) {
419 spin_unlock_bh(&src_sock->rcv_lock);
420 return -EPIPE;
423 if (cor_sock_sndbufavailable(src_sock, 0) == 0) {
424 rc = -EAGAIN;
425 atomic_set(&cs_r_l->ready_to_write, 0);
426 src_sock->flush = 0;
427 goto out;
430 BUG_ON(bufread > (1024 * 1024 * 1024));
431 BUG_ON(buflen > (1024 * 1024 * 1024));
433 rc2 = cor_receive_sock(src_sock, buf, bufread, flush);
435 BUG_ON(rc2 > (1024 * 1024 * 1024));
436 if (unlikely(rc2 == 0))
437 rc = -ENOMEM;
438 else
439 rc = rc2;
441 if (likely(rc > 0))
442 cor_flush_buf(src_sock);
444 out:
445 spin_unlock_bh(&src_sock->rcv_lock);
447 return rc;
450 static int __cor_rawsocket_sendmsg(struct msghdr *msg, __u32 totallen,
451 __u8 flush, struct cor_sock *cs_r_l)
453 char *buf = 0;
454 __u32 bufread = 0;
455 __u32 buflen = cor_buf_optlen(totallen, 1);
456 __u32 len = totallen;
457 size_t st_rc;
458 int rc;
460 BUG_ON(totallen > (1024 * 1024 * 1024));
461 BUG_ON(buflen > (1024 * 1024 * 1024));
463 if (buflen < len) {
464 len = buflen;
465 flush = 0;
468 if (unlikely(len <= 0))
469 return 0;
471 buf = kmalloc(buflen, GFP_KERNEL);
472 if (unlikely(buf == 0))
473 return -ENOMEM;
475 memset(buf, 0, buflen);
477 st_rc = copy_from_iter(buf + bufread, len, &msg->msg_iter);
479 if (unlikely(st_rc != len)) {
480 kfree(buf);
481 return -EFAULT;
484 rc = ___cor_rawsocket_sendmsg(buf, len, buflen, flush, cs_r_l);
486 kfree(buf);
488 return rc;
491 static int _cor_rawsocket_sendmsg(struct msghdr *msg, __u32 totallen,
492 struct cor_sock *cs, __u8 flush)
494 int copied;
496 BUG_ON(totallen > (1024 * 1024 * 1024));
498 mutex_lock(&cs->lock);
500 BUG_ON(cs->type != CS_TYPE_UNCONNECTED && cs->type != CS_TYPE_CONN_RAW);
501 if (unlikely(cs->type == CS_TYPE_UNCONNECTED)) {
502 mutex_unlock(&cs->lock);
503 return -ENOTCONN;
504 } else if (unlikely(cs->type != CS_TYPE_CONN_RAW)) {
505 mutex_unlock(&cs->lock);
506 return -EBADF;
509 copied = __cor_rawsocket_sendmsg(msg, totallen, flush, cs);
510 BUG_ON(copied > 0 && ((__u32) copied) > totallen);
512 mutex_unlock(&cs->lock);
514 return copied;
517 int cor_rawsocket_sendmsg(struct socket *sock, struct msghdr *msg,
518 size_t total_len)
520 __u8 flush = ((msg->msg_flags & MSG_MORE) == 0) ? 1 : 0;
521 int blocking = (msg->msg_flags & MSG_DONTWAIT) == 0;
523 int rc = 0;
524 int copied = 0;
526 struct cor_sock *cs = (struct cor_sock *) sock->sk;
528 __u32 max = (1024 * 1024 * 1024);
529 __u32 totallen;
531 totallen = total_len;
532 if (unlikely(totallen > max || total_len > max)) {
533 totallen = max;
534 flush = 0;
537 while (rc >= 0 && copied < totallen) {
538 rc = _cor_rawsocket_sendmsg(msg, totallen, cs, flush);
540 BUG_ON(rc > 0 && unlikely((rc > total_len || rc > totallen)));
542 if (rc == -EAGAIN && blocking && copied == 0) {
543 long waitret;
545 waitret = wait_event_interruptible_timeout(
546 *sk_sleep(&cs->sk),
547 atomic_read(&cs->ready_to_write) != 0,
548 cs->sk.sk_sndtimeo);
550 if (unlikely(waitret < 0))
551 rc = sock_intr_errno(cs->sk.sk_sndtimeo);
552 else if (unlikely(waitret == 0))
553 rc = -ETIMEDOUT;
554 else
555 continue;
558 if (rc > 0 || copied == 0)
559 copied += rc;
560 if (unlikely(rc == -EFAULT))
561 copied = rc;
563 BUG_ON(copied > 0 && ((__u32) copied > totallen));
566 return rc;
569 static int __cor_rawsocket_recvmsg(struct msghdr *msg, __u32 totallen,
570 struct cor_sock *cs)
572 struct cor_data_buf_item *dbi = cs->data.conn_raw.rcvitem;
573 __u32 written = 0;
575 __u32 len;
576 size_t st_rc;
578 BUG_ON(totallen > (1024 * 1024 * 1024));
580 if (dbi == 0)
581 return -EAGAIN;
583 BUG_ON(dbi->datalen <= cs->data.conn_raw.rcvoffset);
585 len = totallen;
586 if (len > (dbi->datalen - cs->data.conn_raw.rcvoffset))
587 len = dbi->datalen - cs->data.conn_raw.rcvoffset;
589 if (unlikely(len <= 0))
590 return -EAGAIN;
592 st_rc = copy_to_iter(dbi->buf + cs->data.conn_raw.rcvoffset, len,
593 &msg->msg_iter);
595 if (unlikely(st_rc != len))
596 return -EFAULT;
598 written += len;
599 cs->data.conn_raw.rcvoffset += len;
600 if (dbi->datalen == cs->data.conn_raw.rcvoffset) {
601 cor_databuf_item_free(cs->data.conn_raw.rcvitem);
602 cs->data.conn_raw.rcvitem = 0;
603 cs->data.conn_raw.rcvoffset = 0;
606 BUG_ON(written > totallen);
608 return written;
611 static int _cor_rawsocket_recvmsg(struct msghdr *msg, __u32 totallen,
612 struct cor_sock *cs_r)
614 int copied = 0;
615 int rc = 0;
617 struct cor_conn *trgt_sock;
619 mutex_lock(&cs_r->lock);
621 BUG_ON(cs_r->type != CS_TYPE_CONN_RAW);
623 trgt_sock = cs_r->data.conn_raw.trgt_sock;
625 if (unlikely(cs_r->data.conn_raw.src_sock == 0 || trgt_sock == 0)) {
626 mutex_unlock(&cs_r->lock);
627 return -ENOTCONN;
630 cor_conn_kref_get(trgt_sock, "stack");
632 while (rc >= 0 && copied < totallen) {
633 if (cs_r->data.conn_raw.rcvitem != 0)
634 goto recv;
636 spin_lock_bh(&trgt_sock->rcv_lock);
637 if (unlikely(unlikely(cor_is_trgt_sock(trgt_sock, cs_r) == 0) |
638 unlikely(trgt_sock->isreset != 0))) {
639 spin_unlock_bh(&trgt_sock->rcv_lock);
640 cor_conn_kref_put(trgt_sock, "stack");
641 mutex_unlock(&cs_r->lock);
642 return -EPIPE;
645 cor_databuf_pull_dbi(cs_r, trgt_sock);
646 if (cs_r->data.conn_raw.rcvitem == 0)
647 atomic_set(&cs_r->ready_to_read, 0);
649 cor_bufsize_read_to_sock(trgt_sock);
651 spin_unlock_bh(&trgt_sock->rcv_lock);
653 recv:
654 rc = __cor_rawsocket_recvmsg(msg, totallen - copied, cs_r);
656 if (rc > 0 || copied == 0)
657 copied += rc;
658 if (unlikely(rc == -EFAULT))
659 copied = rc;
661 BUG_ON(copied > 0 && ((__u32) copied > totallen));
664 mutex_unlock(&cs_r->lock);
666 if (likely(copied > 0))
667 cor_wake_sender(trgt_sock);
669 cor_conn_kref_put(trgt_sock, "stack");
671 return copied;
674 int cor_rawsocket_recvmsg(struct socket *sock, struct msghdr *msg,
675 size_t total_len, int flags)
677 struct cor_sock *cs = (struct cor_sock *) sock->sk;
679 int blocking = (flags & MSG_DONTWAIT) == 0;
681 int rc = 0;
682 __u32 max = (1024 * 1024 * 1024);
683 __u32 totallen;
685 totallen = total_len;
686 if (unlikely(totallen > max || total_len > max))
687 totallen = max;
689 if (unlikely((flags & MSG_PEEK) != 0))
690 return -EINVAL;
692 mutex_lock(&cs->lock);
693 BUG_ON(cs->type != CS_TYPE_UNCONNECTED && cs->type != CS_TYPE_CONN_RAW);
694 if (unlikely(cs->type == CS_TYPE_UNCONNECTED)) {
695 mutex_unlock(&cs->lock);
696 return -ENOTCONN;
697 } else if (unlikely(cs->type != CS_TYPE_CONN_RAW)) {
698 mutex_unlock(&cs->lock);
699 return -EBADF;
701 mutex_unlock(&cs->lock);
703 recv:
704 rc = _cor_rawsocket_recvmsg(msg, totallen, cs);
706 BUG_ON(rc > 0 && unlikely((rc > total_len || rc > totallen)));
708 if (rc == -EAGAIN && blocking) {
709 if (wait_event_interruptible(*sk_sleep(&cs->sk),
710 atomic_read(&cs->ready_to_read) != 0) == 0)
711 goto recv;
712 rc = -ERESTARTSYS;
715 return rc;
718 static unsigned int cor_rawsocket_poll(struct file *file, struct socket *sock,
719 poll_table *wait)
721 struct cor_sock *cs = (struct cor_sock *) sock->sk;
723 sock_poll_wait(file, sock, wait);
724 return _cor_rawsocket_poll(cs, U32_MAX, 1);
728 const struct proto_ops cor_raw_proto_ops = {
729 .family = PF_COR,
730 .owner = THIS_MODULE,
731 .release = cor_rawsocket_release,
732 .bind = cor_rawsocket_bind,
733 .connect = cor_rawsocket_connect,
734 .accept = cor_rawsocket_accept,
735 .listen = cor_rawsocket_listen,
736 .shutdown = cor_rawsocket_shutdown,
737 .ioctl = cor_rawsocket_ioctl,
738 .setsockopt = cor_rawsocket_setsockopt,
739 .getsockopt = cor_rawsocket_getsockopt,
740 #ifdef CONFIG_COMPAT
741 .combat_ioctl = cor_rawsocket_ioctl,
742 .compat_setsockopt = cor_rawsocket_setsockopt,
743 .compat_getsockopt = cor_rawsocket_getsockopt,
744 #endif
745 .sendmsg = cor_rawsocket_sendmsg,
746 .recvmsg = cor_rawsocket_recvmsg,
747 .poll = cor_rawsocket_poll,
748 .socketpair = cor_socket_socketpair,
749 .getname = cor_socket_getname,
750 .mmap = cor_socket_mmap,
752 /* sendpage, splice_read, are optional */
755 int cor_create_raw_sock(struct net *net, struct socket *sock, int protocol,
756 int kern)
758 int rc = _cor_createsock(net, sock, protocol, kern, 1);
760 if (rc != 0)
761 return rc;
763 sock->ops = &cor_raw_proto_ops;
765 return 0;
768 MODULE_LICENSE("GPL");