2 * net/tipc/socket.c: TIPC socket API
4 * Copyright (c) 2001-2007, 2012 Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
40 #include <linux/export.h>
43 #define SS_LISTENING -1 /* socket is listening */
44 #define SS_READY -2 /* socket is connectionless */
46 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
51 struct tipc_portid peer_name
;
52 unsigned int conn_timeout
;
55 #define tipc_sk(sk) ((struct tipc_sock *)(sk))
56 #define tipc_sk_port(sk) (tipc_sk(sk)->p)
58 #define tipc_rx_ready(sock) (!skb_queue_empty(&sock->sk->sk_receive_queue) || \
59 (sock->state == SS_DISCONNECTING))
61 static int backlog_rcv(struct sock
*sk
, struct sk_buff
*skb
);
62 static u32
dispatch(struct tipc_port
*tport
, struct sk_buff
*buf
);
63 static void wakeupdispatch(struct tipc_port
*tport
);
64 static void tipc_data_ready(struct sock
*sk
, int len
);
65 static void tipc_write_space(struct sock
*sk
);
66 static int release(struct socket
*sock
);
67 static int accept(struct socket
*sock
, struct socket
*new_sock
, int flags
);
69 static const struct proto_ops packet_ops
;
70 static const struct proto_ops stream_ops
;
71 static const struct proto_ops msg_ops
;
73 static struct proto tipc_proto
;
74 static struct proto tipc_proto_kern
;
76 static int sockets_enabled
;
79 * Revised TIPC socket locking policy:
81 * Most socket operations take the standard socket lock when they start
82 * and hold it until they finish (or until they need to sleep). Acquiring
83 * this lock grants the owner exclusive access to the fields of the socket
84 * data structures, with the exception of the backlog queue. A few socket
85 * operations can be done without taking the socket lock because they only
86 * read socket information that never changes during the life of the socket.
88 * Socket operations may acquire the lock for the associated TIPC port if they
89 * need to perform an operation on the port. If any routine needs to acquire
90 * both the socket lock and the port lock it must take the socket lock first
91 * to avoid the risk of deadlock.
93 * The dispatcher handling incoming messages cannot grab the socket lock in
94 * the standard fashion, since invoked it runs at the BH level and cannot block.
95 * Instead, it checks to see if the socket lock is currently owned by someone,
96 * and either handles the message itself or adds it to the socket's backlog
97 * queue; in the latter case the queued message is processed once the process
98 * owning the socket lock releases it.
100 * NOTE: Releasing the socket lock while an operation is sleeping overcomes
101 * the problem of a blocked socket operation preventing any other operations
102 * from occurring. However, applications must be careful if they have
103 * multiple threads trying to send (or receive) on the same socket, as these
104 * operations might interfere with each other. For example, doing a connect
105 * and a receive at the same time might allow the receive to consume the
106 * ACK message meant for the connect. While additional work could be done
107 * to try and overcome this, it doesn't seem to be worthwhile at the present.
109 * NOTE: Releasing the socket lock while an operation is sleeping also ensures
110 * that another operation that must be performed in a non-blocking manner is
111 * not delayed for very long because the lock has already been taken.
113 * NOTE: This code assumes that certain fields of a port/socket pair are
114 * constant over its lifetime; such fields can be examined without taking
115 * the socket lock and/or port lock, and do not need to be re-read even
116 * after resuming processing after waiting. These fields include:
118 * - pointer to socket sk structure (aka tipc_sock structure)
119 * - pointer to port structure
124 * advance_rx_queue - discard first buffer in socket receive queue
126 * Caller must hold socket lock
128 static void advance_rx_queue(struct sock
*sk
)
130 kfree_skb(__skb_dequeue(&sk
->sk_receive_queue
));
134 * reject_rx_queue - reject all buffers in socket receive queue
136 * Caller must hold socket lock
138 static void reject_rx_queue(struct sock
*sk
)
142 while ((buf
= __skb_dequeue(&sk
->sk_receive_queue
)))
143 tipc_reject_msg(buf
, TIPC_ERR_NO_PORT
);
147 * tipc_sk_create - create a TIPC socket
148 * @net: network namespace (must be default network)
149 * @sock: pre-allocated socket structure
150 * @protocol: protocol indicator (must be 0)
151 * @kern: caused by kernel or by userspace?
153 * This routine creates additional data structures used by the TIPC socket,
154 * initializes them, and links them together.
156 * Returns 0 on success, errno otherwise
158 static int tipc_sk_create(struct net
*net
, struct socket
*sock
, int protocol
,
161 const struct proto_ops
*ops
;
164 struct tipc_port
*tp_ptr
;
166 /* Validate arguments */
167 if (unlikely(protocol
!= 0))
168 return -EPROTONOSUPPORT
;
170 switch (sock
->type
) {
173 state
= SS_UNCONNECTED
;
177 state
= SS_UNCONNECTED
;
188 /* Allocate socket's protocol area */
190 sk
= sk_alloc(net
, AF_TIPC
, GFP_KERNEL
, &tipc_proto
);
192 sk
= sk_alloc(net
, AF_TIPC
, GFP_KERNEL
, &tipc_proto_kern
);
197 /* Allocate TIPC port for socket to use */
198 tp_ptr
= tipc_createport(sk
, &dispatch
, &wakeupdispatch
,
199 TIPC_LOW_IMPORTANCE
);
200 if (unlikely(!tp_ptr
)) {
205 /* Finish initializing socket data structures */
209 sock_init_data(sock
, sk
);
210 sk
->sk_backlog_rcv
= backlog_rcv
;
211 sk
->sk_rcvbuf
= sysctl_tipc_rmem
[1];
212 sk
->sk_data_ready
= tipc_data_ready
;
213 sk
->sk_write_space
= tipc_write_space
;
214 tipc_sk(sk
)->p
= tp_ptr
;
215 tipc_sk(sk
)->conn_timeout
= CONN_TIMEOUT_DEFAULT
;
217 spin_unlock_bh(tp_ptr
->lock
);
219 if (sock
->state
== SS_READY
) {
220 tipc_set_portunreturnable(tp_ptr
->ref
, 1);
221 if (sock
->type
== SOCK_DGRAM
)
222 tipc_set_portunreliable(tp_ptr
->ref
, 1);
229 * tipc_sock_create_local - create TIPC socket from inside TIPC module
230 * @type: socket type - SOCK_RDM or SOCK_SEQPACKET
232 * We cannot use sock_creat_kern here because it bumps module user count.
233 * Since socket owner and creator is the same module we must make sure
234 * that module count remains zero for module local sockets, otherwise
235 * we cannot do rmmod.
237 * Returns 0 on success, errno otherwise
239 int tipc_sock_create_local(int type
, struct socket
**res
)
243 rc
= sock_create_lite(AF_TIPC
, type
, 0, res
);
245 pr_err("Failed to create kernel socket\n");
248 tipc_sk_create(&init_net
, *res
, 0, 1);
254 * tipc_sock_release_local - release socket created by tipc_sock_create_local
255 * @sock: the socket to be released.
257 * Module reference count is not incremented when such sockets are created,
258 * so we must keep it from being decremented when they are released.
260 void tipc_sock_release_local(struct socket
*sock
)
268 * tipc_sock_accept_local - accept a connection on a socket created
269 * with tipc_sock_create_local. Use this function to avoid that
270 * module reference count is inadvertently incremented.
272 * @sock: the accepting socket
273 * @newsock: reference to the new socket to be created
274 * @flags: socket flags
277 int tipc_sock_accept_local(struct socket
*sock
, struct socket
**newsock
,
280 struct sock
*sk
= sock
->sk
;
283 ret
= sock_create_lite(sk
->sk_family
, sk
->sk_type
,
284 sk
->sk_protocol
, newsock
);
288 ret
= accept(sock
, *newsock
, flags
);
290 sock_release(*newsock
);
293 (*newsock
)->ops
= sock
->ops
;
298 * release - destroy a TIPC socket
299 * @sock: socket to destroy
301 * This routine cleans up any messages that are still queued on the socket.
302 * For DGRAM and RDM socket types, all queued messages are rejected.
303 * For SEQPACKET and STREAM socket types, the first message is rejected
304 * and any others are discarded. (If the first message on a STREAM socket
305 * is partially-read, it is discarded and the next one is rejected instead.)
307 * NOTE: Rejected messages are not necessarily returned to the sender! They
308 * are returned or discarded according to the "destination droppable" setting
309 * specified for the message by the sender.
311 * Returns 0 on success, errno otherwise
313 static int release(struct socket
*sock
)
315 struct sock
*sk
= sock
->sk
;
316 struct tipc_port
*tport
;
321 * Exit if socket isn't fully initialized (occurs when a failed accept()
322 * releases a pre-allocated child socket that was never used)
327 tport
= tipc_sk_port(sk
);
331 * Reject all unreceived messages, except on an active connection
332 * (which disconnects locally & sends a 'FIN+' to peer)
334 while (sock
->state
!= SS_DISCONNECTING
) {
335 buf
= __skb_dequeue(&sk
->sk_receive_queue
);
338 if (TIPC_SKB_CB(buf
)->handle
!= NULL
)
341 if ((sock
->state
== SS_CONNECTING
) ||
342 (sock
->state
== SS_CONNECTED
)) {
343 sock
->state
= SS_DISCONNECTING
;
344 tipc_disconnect(tport
->ref
);
346 tipc_reject_msg(buf
, TIPC_ERR_NO_PORT
);
351 * Delete TIPC port; this ensures no more messages are queued
352 * (also disconnects an active connection & sends a 'FIN-' to peer)
354 res
= tipc_deleteport(tport
);
356 /* Discard any remaining (connection-based) messages in receive queue */
357 __skb_queue_purge(&sk
->sk_receive_queue
);
359 /* Reject any messages that accumulated in backlog queue */
360 sock
->state
= SS_DISCONNECTING
;
370 * bind - associate or disassocate TIPC name(s) with a socket
371 * @sock: socket structure
372 * @uaddr: socket address describing name(s) and desired operation
373 * @uaddr_len: size of socket address data structure
375 * Name and name sequence binding is indicated using a positive scope value;
376 * a negative scope value unbinds the specified name. Specifying no name
377 * (i.e. a socket address length of 0) unbinds all names from the socket.
379 * Returns 0 on success, errno otherwise
381 * NOTE: This routine doesn't need to take the socket lock since it doesn't
382 * access any non-constant socket information.
384 static int bind(struct socket
*sock
, struct sockaddr
*uaddr
, int uaddr_len
)
386 struct sock
*sk
= sock
->sk
;
387 struct sockaddr_tipc
*addr
= (struct sockaddr_tipc
*)uaddr
;
388 struct tipc_port
*tport
= tipc_sk_port(sock
->sk
);
392 if (unlikely(!uaddr_len
)) {
393 res
= tipc_withdraw(tport
, 0, NULL
);
397 if (uaddr_len
< sizeof(struct sockaddr_tipc
)) {
401 if (addr
->family
!= AF_TIPC
) {
406 if (addr
->addrtype
== TIPC_ADDR_NAME
)
407 addr
->addr
.nameseq
.upper
= addr
->addr
.nameseq
.lower
;
408 else if (addr
->addrtype
!= TIPC_ADDR_NAMESEQ
) {
413 if ((addr
->addr
.nameseq
.type
< TIPC_RESERVED_TYPES
) &&
414 (addr
->addr
.nameseq
.type
!= TIPC_TOP_SRV
) &&
415 (addr
->addr
.nameseq
.type
!= TIPC_CFG_SRV
)) {
420 res
= (addr
->scope
> 0) ?
421 tipc_publish(tport
, addr
->scope
, &addr
->addr
.nameseq
) :
422 tipc_withdraw(tport
, -addr
->scope
, &addr
->addr
.nameseq
);
429 * get_name - get port ID of socket or peer socket
430 * @sock: socket structure
431 * @uaddr: area for returned socket address
432 * @uaddr_len: area for returned length of socket address
433 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
435 * Returns 0 on success, errno otherwise
437 * NOTE: This routine doesn't need to take the socket lock since it only
438 * accesses socket information that is unchanging (or which changes in
439 * a completely predictable manner).
441 static int get_name(struct socket
*sock
, struct sockaddr
*uaddr
,
442 int *uaddr_len
, int peer
)
444 struct sockaddr_tipc
*addr
= (struct sockaddr_tipc
*)uaddr
;
445 struct tipc_sock
*tsock
= tipc_sk(sock
->sk
);
447 memset(addr
, 0, sizeof(*addr
));
449 if ((sock
->state
!= SS_CONNECTED
) &&
450 ((peer
!= 2) || (sock
->state
!= SS_DISCONNECTING
)))
452 addr
->addr
.id
.ref
= tsock
->peer_name
.ref
;
453 addr
->addr
.id
.node
= tsock
->peer_name
.node
;
455 addr
->addr
.id
.ref
= tsock
->p
->ref
;
456 addr
->addr
.id
.node
= tipc_own_addr
;
459 *uaddr_len
= sizeof(*addr
);
460 addr
->addrtype
= TIPC_ADDR_ID
;
461 addr
->family
= AF_TIPC
;
463 addr
->addr
.name
.domain
= 0;
469 * poll - read and possibly block on pollmask
470 * @file: file structure associated with the socket
471 * @sock: socket for which to calculate the poll bits
474 * Returns pollmask value
477 * It appears that the usual socket locking mechanisms are not useful here
478 * since the pollmask info is potentially out-of-date the moment this routine
479 * exits. TCP and other protocols seem to rely on higher level poll routines
480 * to handle any preventable race conditions, so TIPC will do the same ...
482 * TIPC sets the returned events as follows:
484 * socket state flags set
485 * ------------ ---------
486 * unconnected no read flags
487 * POLLOUT if port is not congested
489 * connecting POLLIN/POLLRDNORM if ACK/NACK in rx queue
492 * connected POLLIN/POLLRDNORM if data in rx queue
493 * POLLOUT if port is not congested
495 * disconnecting POLLIN/POLLRDNORM/POLLHUP
498 * listening POLLIN if SYN in rx queue
501 * ready POLLIN/POLLRDNORM if data in rx queue
502 * [connectionless] POLLOUT (since port cannot be congested)
504 * IMPORTANT: The fact that a read or write operation is indicated does NOT
505 * imply that the operation will succeed, merely that it should be performed
506 * and will not block.
508 static unsigned int poll(struct file
*file
, struct socket
*sock
,
511 struct sock
*sk
= sock
->sk
;
514 sock_poll_wait(file
, sk_sleep(sk
), wait
);
516 switch ((int)sock
->state
) {
518 if (!tipc_sk_port(sk
)->congested
)
523 if (!tipc_sk_port(sk
)->congested
)
528 if (!skb_queue_empty(&sk
->sk_receive_queue
))
529 mask
|= (POLLIN
| POLLRDNORM
);
531 case SS_DISCONNECTING
:
532 mask
= (POLLIN
| POLLRDNORM
| POLLHUP
);
540 * dest_name_check - verify user is permitted to send to specified port name
541 * @dest: destination address
542 * @m: descriptor for message to be sent
544 * Prevents restricted configuration commands from being issued by
545 * unauthorized users.
547 * Returns 0 if permission is granted, otherwise errno
549 static int dest_name_check(struct sockaddr_tipc
*dest
, struct msghdr
*m
)
551 struct tipc_cfg_msg_hdr hdr
;
553 if (likely(dest
->addr
.name
.name
.type
>= TIPC_RESERVED_TYPES
))
555 if (likely(dest
->addr
.name
.name
.type
== TIPC_TOP_SRV
))
557 if (likely(dest
->addr
.name
.name
.type
!= TIPC_CFG_SRV
))
560 if (!m
->msg_iovlen
|| (m
->msg_iov
[0].iov_len
< sizeof(hdr
)))
562 if (copy_from_user(&hdr
, m
->msg_iov
[0].iov_base
, sizeof(hdr
)))
564 if ((ntohs(hdr
.tcm_type
) & 0xC000) && (!capable(CAP_NET_ADMIN
)))
570 static int tipc_wait_for_sndmsg(struct socket
*sock
, long *timeo_p
)
572 struct sock
*sk
= sock
->sk
;
573 struct tipc_port
*tport
= tipc_sk_port(sk
);
578 int err
= sock_error(sk
);
581 if (sock
->state
== SS_DISCONNECTING
)
585 if (signal_pending(current
))
586 return sock_intr_errno(*timeo_p
);
588 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
589 done
= sk_wait_event(sk
, timeo_p
, !tport
->congested
);
590 finish_wait(sk_sleep(sk
), &wait
);
596 * send_msg - send message in connectionless manner
597 * @iocb: if NULL, indicates that socket lock is already held
598 * @sock: socket structure
599 * @m: message to send
600 * @total_len: length of message
602 * Message must have an destination specified explicitly.
603 * Used for SOCK_RDM and SOCK_DGRAM messages,
604 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
605 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
607 * Returns the number of bytes sent on success, or errno otherwise
609 static int send_msg(struct kiocb
*iocb
, struct socket
*sock
,
610 struct msghdr
*m
, size_t total_len
)
612 struct sock
*sk
= sock
->sk
;
613 struct tipc_port
*tport
= tipc_sk_port(sk
);
614 struct sockaddr_tipc
*dest
= (struct sockaddr_tipc
*)m
->msg_name
;
620 return -EDESTADDRREQ
;
621 if (unlikely((m
->msg_namelen
< sizeof(*dest
)) ||
622 (dest
->family
!= AF_TIPC
)))
624 if (total_len
> TIPC_MAX_USER_MSG_SIZE
)
630 needs_conn
= (sock
->state
!= SS_READY
);
631 if (unlikely(needs_conn
)) {
632 if (sock
->state
== SS_LISTENING
) {
636 if (sock
->state
!= SS_UNCONNECTED
) {
640 if (tport
->published
) {
644 if (dest
->addrtype
== TIPC_ADDR_NAME
) {
645 tport
->conn_type
= dest
->addr
.name
.name
.type
;
646 tport
->conn_instance
= dest
->addr
.name
.name
.instance
;
649 /* Abort any pending connection attempts (very unlikely) */
653 timeo
= sock_sndtimeo(sk
, m
->msg_flags
& MSG_DONTWAIT
);
655 if (dest
->addrtype
== TIPC_ADDR_NAME
) {
656 res
= dest_name_check(dest
, m
);
659 res
= tipc_send2name(tport
->ref
,
660 &dest
->addr
.name
.name
,
661 dest
->addr
.name
.domain
,
664 } else if (dest
->addrtype
== TIPC_ADDR_ID
) {
665 res
= tipc_send2port(tport
->ref
,
669 } else if (dest
->addrtype
== TIPC_ADDR_MCAST
) {
674 res
= dest_name_check(dest
, m
);
677 res
= tipc_multicast(tport
->ref
,
682 if (likely(res
!= -ELINKCONG
)) {
683 if (needs_conn
&& (res
>= 0))
684 sock
->state
= SS_CONNECTING
;
687 res
= tipc_wait_for_sndmsg(sock
, &timeo
);
698 static int tipc_wait_for_sndpkt(struct socket
*sock
, long *timeo_p
)
700 struct sock
*sk
= sock
->sk
;
701 struct tipc_port
*tport
= tipc_sk_port(sk
);
706 int err
= sock_error(sk
);
709 if (sock
->state
== SS_DISCONNECTING
)
711 else if (sock
->state
!= SS_CONNECTED
)
715 if (signal_pending(current
))
716 return sock_intr_errno(*timeo_p
);
718 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
719 done
= sk_wait_event(sk
, timeo_p
,
720 (!tport
->congested
|| !tport
->connected
));
721 finish_wait(sk_sleep(sk
), &wait
);
727 * send_packet - send a connection-oriented message
728 * @iocb: if NULL, indicates that socket lock is already held
729 * @sock: socket structure
730 * @m: message to send
731 * @total_len: length of message
733 * Used for SOCK_SEQPACKET messages and SOCK_STREAM data.
735 * Returns the number of bytes sent on success, or errno otherwise
737 static int send_packet(struct kiocb
*iocb
, struct socket
*sock
,
738 struct msghdr
*m
, size_t total_len
)
740 struct sock
*sk
= sock
->sk
;
741 struct tipc_port
*tport
= tipc_sk_port(sk
);
742 struct sockaddr_tipc
*dest
= (struct sockaddr_tipc
*)m
->msg_name
;
746 /* Handle implied connection establishment */
748 return send_msg(iocb
, sock
, m
, total_len
);
750 if (total_len
> TIPC_MAX_USER_MSG_SIZE
)
756 if (unlikely(sock
->state
!= SS_CONNECTED
)) {
757 if (sock
->state
== SS_DISCONNECTING
)
764 timeo
= sock_sndtimeo(sk
, m
->msg_flags
& MSG_DONTWAIT
);
766 res
= tipc_send(tport
->ref
, m
->msg_iov
, total_len
);
767 if (likely(res
!= -ELINKCONG
))
769 res
= tipc_wait_for_sndpkt(sock
, &timeo
);
780 * send_stream - send stream-oriented data
782 * @sock: socket structure
784 * @total_len: total length of data to be sent
786 * Used for SOCK_STREAM data.
788 * Returns the number of bytes sent on success (or partial success),
789 * or errno if no data sent
791 static int send_stream(struct kiocb
*iocb
, struct socket
*sock
,
792 struct msghdr
*m
, size_t total_len
)
794 struct sock
*sk
= sock
->sk
;
795 struct tipc_port
*tport
= tipc_sk_port(sk
);
796 struct msghdr my_msg
;
798 struct iovec
*curr_iov
;
800 char __user
*curr_start
;
809 /* Handle special cases where there is no connection */
810 if (unlikely(sock
->state
!= SS_CONNECTED
)) {
811 if (sock
->state
== SS_UNCONNECTED
)
812 res
= send_packet(NULL
, sock
, m
, total_len
);
814 res
= sock
->state
== SS_DISCONNECTING
? -EPIPE
: -ENOTCONN
;
818 if (unlikely(m
->msg_name
)) {
823 if (total_len
> (unsigned int)INT_MAX
) {
829 * Send each iovec entry using one or more messages
831 * Note: This algorithm is good for the most likely case
832 * (i.e. one large iovec entry), but could be improved to pass sets
833 * of small iovec entries into send_packet().
835 curr_iov
= m
->msg_iov
;
836 curr_iovlen
= m
->msg_iovlen
;
837 my_msg
.msg_iov
= &my_iov
;
838 my_msg
.msg_iovlen
= 1;
839 my_msg
.msg_flags
= m
->msg_flags
;
840 my_msg
.msg_name
= NULL
;
843 hdr_size
= msg_hdr_sz(&tport
->phdr
);
845 while (curr_iovlen
--) {
846 curr_start
= curr_iov
->iov_base
;
847 curr_left
= curr_iov
->iov_len
;
850 bytes_to_send
= tport
->max_pkt
- hdr_size
;
851 if (bytes_to_send
> TIPC_MAX_USER_MSG_SIZE
)
852 bytes_to_send
= TIPC_MAX_USER_MSG_SIZE
;
853 if (curr_left
< bytes_to_send
)
854 bytes_to_send
= curr_left
;
855 my_iov
.iov_base
= curr_start
;
856 my_iov
.iov_len
= bytes_to_send
;
857 res
= send_packet(NULL
, sock
, &my_msg
, bytes_to_send
);
863 curr_left
-= bytes_to_send
;
864 curr_start
+= bytes_to_send
;
865 bytes_sent
+= bytes_to_send
;
877 * auto_connect - complete connection setup to a remote port
878 * @sock: socket structure
879 * @msg: peer's response message
881 * Returns 0 on success, errno otherwise
883 static int auto_connect(struct socket
*sock
, struct tipc_msg
*msg
)
885 struct tipc_sock
*tsock
= tipc_sk(sock
->sk
);
886 struct tipc_port
*p_ptr
;
888 tsock
->peer_name
.ref
= msg_origport(msg
);
889 tsock
->peer_name
.node
= msg_orignode(msg
);
890 p_ptr
= tipc_port_deref(tsock
->p
->ref
);
894 __tipc_connect(tsock
->p
->ref
, p_ptr
, &tsock
->peer_name
);
896 if (msg_importance(msg
) > TIPC_CRITICAL_IMPORTANCE
)
898 msg_set_importance(&p_ptr
->phdr
, (u32
)msg_importance(msg
));
899 sock
->state
= SS_CONNECTED
;
904 * set_orig_addr - capture sender's address for received message
905 * @m: descriptor for message info
906 * @msg: received message header
908 * Note: Address is not captured if not requested by receiver.
910 static void set_orig_addr(struct msghdr
*m
, struct tipc_msg
*msg
)
912 struct sockaddr_tipc
*addr
= (struct sockaddr_tipc
*)m
->msg_name
;
915 addr
->family
= AF_TIPC
;
916 addr
->addrtype
= TIPC_ADDR_ID
;
917 memset(&addr
->addr
, 0, sizeof(addr
->addr
));
918 addr
->addr
.id
.ref
= msg_origport(msg
);
919 addr
->addr
.id
.node
= msg_orignode(msg
);
920 addr
->addr
.name
.domain
= 0; /* could leave uninitialized */
921 addr
->scope
= 0; /* could leave uninitialized */
922 m
->msg_namelen
= sizeof(struct sockaddr_tipc
);
927 * anc_data_recv - optionally capture ancillary data for received message
928 * @m: descriptor for message info
929 * @msg: received message header
930 * @tport: TIPC port associated with message
932 * Note: Ancillary data is not captured if not requested by receiver.
934 * Returns 0 if successful, otherwise errno
936 static int anc_data_recv(struct msghdr
*m
, struct tipc_msg
*msg
,
937 struct tipc_port
*tport
)
945 if (likely(m
->msg_controllen
== 0))
948 /* Optionally capture errored message object(s) */
949 err
= msg
? msg_errcode(msg
) : 0;
952 anc_data
[1] = msg_data_sz(msg
);
953 res
= put_cmsg(m
, SOL_TIPC
, TIPC_ERRINFO
, 8, anc_data
);
957 res
= put_cmsg(m
, SOL_TIPC
, TIPC_RETDATA
, anc_data
[1],
964 /* Optionally capture message destination object */
965 dest_type
= msg
? msg_type(msg
) : TIPC_DIRECT_MSG
;
969 anc_data
[0] = msg_nametype(msg
);
970 anc_data
[1] = msg_namelower(msg
);
971 anc_data
[2] = msg_namelower(msg
);
975 anc_data
[0] = msg_nametype(msg
);
976 anc_data
[1] = msg_namelower(msg
);
977 anc_data
[2] = msg_nameupper(msg
);
980 has_name
= (tport
->conn_type
!= 0);
981 anc_data
[0] = tport
->conn_type
;
982 anc_data
[1] = tport
->conn_instance
;
983 anc_data
[2] = tport
->conn_instance
;
989 res
= put_cmsg(m
, SOL_TIPC
, TIPC_DESTNAME
, 12, anc_data
);
998 * recv_msg - receive packet-oriented message
1000 * @m: descriptor for message info
1001 * @buf_len: total size of user buffer area
1002 * @flags: receive flags
1004 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
1005 * If the complete message doesn't fit in user area, truncate it.
1007 * Returns size of returned message data, errno otherwise
1009 static int recv_msg(struct kiocb
*iocb
, struct socket
*sock
,
1010 struct msghdr
*m
, size_t buf_len
, int flags
)
1012 struct sock
*sk
= sock
->sk
;
1013 struct tipc_port
*tport
= tipc_sk_port(sk
);
1014 struct sk_buff
*buf
;
1015 struct tipc_msg
*msg
;
1021 /* Catch invalid receive requests */
1022 if (unlikely(!buf_len
))
1027 if (unlikely(sock
->state
== SS_UNCONNECTED
)) {
1032 timeout
= sock_rcvtimeo(sk
, flags
& MSG_DONTWAIT
);
1035 /* Look for a message in receive queue; wait if necessary */
1036 while (skb_queue_empty(&sk
->sk_receive_queue
)) {
1037 if (sock
->state
== SS_DISCONNECTING
) {
1041 if (timeout
<= 0L) {
1042 res
= timeout
? timeout
: -EWOULDBLOCK
;
1046 timeout
= wait_event_interruptible_timeout(*sk_sleep(sk
),
1047 tipc_rx_ready(sock
),
1052 /* Look at first message in receive queue */
1053 buf
= skb_peek(&sk
->sk_receive_queue
);
1055 sz
= msg_data_sz(msg
);
1056 err
= msg_errcode(msg
);
1058 /* Discard an empty non-errored message & try again */
1059 if ((!sz
) && (!err
)) {
1060 advance_rx_queue(sk
);
1064 /* Capture sender's address (optional) */
1065 set_orig_addr(m
, msg
);
1067 /* Capture ancillary data (optional) */
1068 res
= anc_data_recv(m
, msg
, tport
);
1072 /* Capture message data (if valid) & compute return value (always) */
1074 if (unlikely(buf_len
< sz
)) {
1076 m
->msg_flags
|= MSG_TRUNC
;
1078 res
= skb_copy_datagram_iovec(buf
, msg_hdr_sz(msg
),
1084 if ((sock
->state
== SS_READY
) ||
1085 ((err
== TIPC_CONN_SHUTDOWN
) || m
->msg_control
))
1091 /* Consume received message (optional) */
1092 if (likely(!(flags
& MSG_PEEK
))) {
1093 if ((sock
->state
!= SS_READY
) &&
1094 (++tport
->conn_unacked
>= TIPC_FLOW_CONTROL_WIN
))
1095 tipc_acknowledge(tport
->ref
, tport
->conn_unacked
);
1096 advance_rx_queue(sk
);
1104 * recv_stream - receive stream-oriented data
1106 * @m: descriptor for message info
1107 * @buf_len: total size of user buffer area
1108 * @flags: receive flags
1110 * Used for SOCK_STREAM messages only. If not enough data is available
1111 * will optionally wait for more; never truncates data.
1113 * Returns size of returned message data, errno otherwise
1115 static int recv_stream(struct kiocb
*iocb
, struct socket
*sock
,
1116 struct msghdr
*m
, size_t buf_len
, int flags
)
1118 struct sock
*sk
= sock
->sk
;
1119 struct tipc_port
*tport
= tipc_sk_port(sk
);
1120 struct sk_buff
*buf
;
1121 struct tipc_msg
*msg
;
1124 int sz_to_copy
, target
, needed
;
1129 /* Catch invalid receive attempts */
1130 if (unlikely(!buf_len
))
1135 if (unlikely((sock
->state
== SS_UNCONNECTED
))) {
1140 target
= sock_rcvlowat(sk
, flags
& MSG_WAITALL
, buf_len
);
1141 timeout
= sock_rcvtimeo(sk
, flags
& MSG_DONTWAIT
);
1144 /* Look for a message in receive queue; wait if necessary */
1145 while (skb_queue_empty(&sk
->sk_receive_queue
)) {
1146 if (sock
->state
== SS_DISCONNECTING
) {
1150 if (timeout
<= 0L) {
1151 res
= timeout
? timeout
: -EWOULDBLOCK
;
1155 timeout
= wait_event_interruptible_timeout(*sk_sleep(sk
),
1156 tipc_rx_ready(sock
),
1161 /* Look at first message in receive queue */
1162 buf
= skb_peek(&sk
->sk_receive_queue
);
1164 sz
= msg_data_sz(msg
);
1165 err
= msg_errcode(msg
);
1167 /* Discard an empty non-errored message & try again */
1168 if ((!sz
) && (!err
)) {
1169 advance_rx_queue(sk
);
1173 /* Optionally capture sender's address & ancillary data of first msg */
1174 if (sz_copied
== 0) {
1175 set_orig_addr(m
, msg
);
1176 res
= anc_data_recv(m
, msg
, tport
);
1181 /* Capture message data (if valid) & compute return value (always) */
1183 u32 offset
= (u32
)(unsigned long)(TIPC_SKB_CB(buf
)->handle
);
1186 needed
= (buf_len
- sz_copied
);
1187 sz_to_copy
= (sz
<= needed
) ? sz
: needed
;
1189 res
= skb_copy_datagram_iovec(buf
, msg_hdr_sz(msg
) + offset
,
1190 m
->msg_iov
, sz_to_copy
);
1194 sz_copied
+= sz_to_copy
;
1196 if (sz_to_copy
< sz
) {
1197 if (!(flags
& MSG_PEEK
))
1198 TIPC_SKB_CB(buf
)->handle
=
1199 (void *)(unsigned long)(offset
+ sz_to_copy
);
1204 goto exit
; /* can't add error msg to valid data */
1206 if ((err
== TIPC_CONN_SHUTDOWN
) || m
->msg_control
)
1212 /* Consume received message (optional) */
1213 if (likely(!(flags
& MSG_PEEK
))) {
1214 if (unlikely(++tport
->conn_unacked
>= TIPC_FLOW_CONTROL_WIN
))
1215 tipc_acknowledge(tport
->ref
, tport
->conn_unacked
);
1216 advance_rx_queue(sk
);
1219 /* Loop around if more data is required */
1220 if ((sz_copied
< buf_len
) && /* didn't get all requested data */
1221 (!skb_queue_empty(&sk
->sk_receive_queue
) ||
1222 (sz_copied
< target
)) && /* and more is ready or required */
1223 (!(flags
& MSG_PEEK
)) && /* and aren't just peeking at data */
1224 (!err
)) /* and haven't reached a FIN */
1229 return sz_copied
? sz_copied
: res
;
1233 * tipc_write_space - wake up thread if port congestion is released
1236 static void tipc_write_space(struct sock
*sk
)
1238 struct socket_wq
*wq
;
1241 wq
= rcu_dereference(sk
->sk_wq
);
1242 if (wq_has_sleeper(wq
))
1243 wake_up_interruptible_sync_poll(&wq
->wait
, POLLOUT
|
1244 POLLWRNORM
| POLLWRBAND
);
1249 * tipc_data_ready - wake up threads to indicate messages have been received
1251 * @len: the length of messages
1253 static void tipc_data_ready(struct sock
*sk
, int len
)
1255 struct socket_wq
*wq
;
1258 wq
= rcu_dereference(sk
->sk_wq
);
1259 if (wq_has_sleeper(wq
))
1260 wake_up_interruptible_sync_poll(&wq
->wait
, POLLIN
|
1261 POLLRDNORM
| POLLRDBAND
);
1266 * filter_connect - Handle all incoming messages for a connection-based socket
1267 * @tsock: TIPC socket
1270 * Returns TIPC error status code and socket error status code
1271 * once it encounters some errors
1273 static u32
filter_connect(struct tipc_sock
*tsock
, struct sk_buff
**buf
)
1275 struct socket
*sock
= tsock
->sk
.sk_socket
;
1276 struct tipc_msg
*msg
= buf_msg(*buf
);
1277 struct sock
*sk
= &tsock
->sk
;
1278 u32 retval
= TIPC_ERR_NO_PORT
;
1284 switch ((int)sock
->state
) {
1286 /* Accept only connection-based messages sent by peer */
1287 if (msg_connected(msg
) && tipc_port_peer_msg(tsock
->p
, msg
)) {
1288 if (unlikely(msg_errcode(msg
))) {
1289 sock
->state
= SS_DISCONNECTING
;
1290 __tipc_disconnect(tsock
->p
);
1296 /* Accept only ACK or NACK message */
1297 if (unlikely(msg_errcode(msg
))) {
1298 sock
->state
= SS_DISCONNECTING
;
1299 sk
->sk_err
= ECONNREFUSED
;
1304 if (unlikely(!msg_connected(msg
)))
1307 res
= auto_connect(sock
, msg
);
1309 sock
->state
= SS_DISCONNECTING
;
1315 /* If an incoming message is an 'ACK-', it should be
1316 * discarded here because it doesn't contain useful
1317 * data. In addition, we should try to wake up
1318 * connect() routine if sleeping.
1320 if (msg_data_sz(msg
) == 0) {
1323 if (waitqueue_active(sk_sleep(sk
)))
1324 wake_up_interruptible(sk_sleep(sk
));
1329 case SS_UNCONNECTED
:
1330 /* Accept only SYN message */
1331 if (!msg_connected(msg
) && !(msg_errcode(msg
)))
1334 case SS_DISCONNECTING
:
1337 pr_err("Unknown socket state %u\n", sock
->state
);
1343 * rcvbuf_limit - get proper overload limit of socket receive queue
1347 * For all connection oriented messages, irrespective of importance,
1348 * the default overload value (i.e. 67MB) is set as limit.
1350 * For all connectionless messages, by default new queue limits are
1353 * TIPC_LOW_IMPORTANCE (4 MB)
1354 * TIPC_MEDIUM_IMPORTANCE (8 MB)
1355 * TIPC_HIGH_IMPORTANCE (16 MB)
1356 * TIPC_CRITICAL_IMPORTANCE (32 MB)
1358 * Returns overload limit according to corresponding message importance
1360 static unsigned int rcvbuf_limit(struct sock
*sk
, struct sk_buff
*buf
)
1362 struct tipc_msg
*msg
= buf_msg(buf
);
1364 if (msg_connected(msg
))
1365 return sysctl_tipc_rmem
[2];
1367 return sk
->sk_rcvbuf
>> TIPC_CRITICAL_IMPORTANCE
<<
1368 msg_importance(msg
);
1372 * filter_rcv - validate incoming message
1376 * Enqueues message on receive queue if acceptable; optionally handles
1377 * disconnect indication for a connected socket.
1379 * Called with socket lock already taken; port lock may also be taken.
1381 * Returns TIPC error status code (TIPC_OK if message is not to be rejected)
1383 static u32
filter_rcv(struct sock
*sk
, struct sk_buff
*buf
)
1385 struct socket
*sock
= sk
->sk_socket
;
1386 struct tipc_msg
*msg
= buf_msg(buf
);
1387 unsigned int limit
= rcvbuf_limit(sk
, buf
);
1390 /* Reject message if it is wrong sort of message for socket */
1391 if (msg_type(msg
) > TIPC_DIRECT_MSG
)
1392 return TIPC_ERR_NO_PORT
;
1394 if (sock
->state
== SS_READY
) {
1395 if (msg_connected(msg
))
1396 return TIPC_ERR_NO_PORT
;
1398 res
= filter_connect(tipc_sk(sk
), &buf
);
1399 if (res
!= TIPC_OK
|| buf
== NULL
)
1403 /* Reject message if there isn't room to queue it */
1404 if (sk_rmem_alloc_get(sk
) + buf
->truesize
>= limit
)
1405 return TIPC_ERR_OVERLOAD
;
1407 /* Enqueue message */
1408 TIPC_SKB_CB(buf
)->handle
= NULL
;
1409 __skb_queue_tail(&sk
->sk_receive_queue
, buf
);
1410 skb_set_owner_r(buf
, sk
);
1412 sk
->sk_data_ready(sk
, 0);
1417 * backlog_rcv - handle incoming message from backlog queue
1421 * Caller must hold socket lock, but not port lock.
1425 static int backlog_rcv(struct sock
*sk
, struct sk_buff
*buf
)
1429 res
= filter_rcv(sk
, buf
);
1431 tipc_reject_msg(buf
, res
);
1436 * dispatch - handle incoming message
1437 * @tport: TIPC port that received message
1440 * Called with port lock already taken.
1442 * Returns TIPC error status code (TIPC_OK if message is not to be rejected)
1444 static u32
dispatch(struct tipc_port
*tport
, struct sk_buff
*buf
)
1446 struct sock
*sk
= tport
->sk
;
1450 * Process message if socket is unlocked; otherwise add to backlog queue
1452 * This code is based on sk_receive_skb(), but must be distinct from it
1453 * since a TIPC-specific filter/reject mechanism is utilized
1456 if (!sock_owned_by_user(sk
)) {
1457 res
= filter_rcv(sk
, buf
);
1459 if (sk_add_backlog(sk
, buf
, rcvbuf_limit(sk
, buf
)))
1460 res
= TIPC_ERR_OVERLOAD
;
1470 * wakeupdispatch - wake up port after congestion
1471 * @tport: port to wakeup
1473 * Called with port lock already taken.
1475 static void wakeupdispatch(struct tipc_port
*tport
)
1477 struct sock
*sk
= tport
->sk
;
1479 sk
->sk_write_space(sk
);
1482 static int tipc_wait_for_connect(struct socket
*sock
, long *timeo_p
)
1484 struct sock
*sk
= sock
->sk
;
1489 int err
= sock_error(sk
);
1494 if (signal_pending(current
))
1495 return sock_intr_errno(*timeo_p
);
1497 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
1498 done
= sk_wait_event(sk
, timeo_p
, sock
->state
!= SS_CONNECTING
);
1499 finish_wait(sk_sleep(sk
), &wait
);
1505 * connect - establish a connection to another TIPC port
1506 * @sock: socket structure
1507 * @dest: socket address for destination port
1508 * @destlen: size of socket address data structure
1509 * @flags: file-related flags associated with socket
1511 * Returns 0 on success, errno otherwise
1513 static int connect(struct socket
*sock
, struct sockaddr
*dest
, int destlen
,
1516 struct sock
*sk
= sock
->sk
;
1517 struct sockaddr_tipc
*dst
= (struct sockaddr_tipc
*)dest
;
1518 struct msghdr m
= {NULL
,};
1519 long timeout
= (flags
& O_NONBLOCK
) ? 0 : tipc_sk(sk
)->conn_timeout
;
1520 socket_state previous
;
1525 /* For now, TIPC does not allow use of connect() with DGRAM/RDM types */
1526 if (sock
->state
== SS_READY
) {
1532 * Reject connection attempt using multicast address
1534 * Note: send_msg() validates the rest of the address fields,
1535 * so there's no need to do it here
1537 if (dst
->addrtype
== TIPC_ADDR_MCAST
) {
1542 previous
= sock
->state
;
1543 switch (sock
->state
) {
1544 case SS_UNCONNECTED
:
1545 /* Send a 'SYN-' to destination */
1547 m
.msg_namelen
= destlen
;
1549 /* If connect is in non-blocking case, set MSG_DONTWAIT to
1550 * indicate send_msg() is never blocked.
1553 m
.msg_flags
= MSG_DONTWAIT
;
1555 res
= send_msg(NULL
, sock
, &m
, 0);
1556 if ((res
< 0) && (res
!= -EWOULDBLOCK
))
1559 /* Just entered SS_CONNECTING state; the only
1560 * difference is that return value in non-blocking
1561 * case is EINPROGRESS, rather than EALREADY.
1565 if (previous
== SS_CONNECTING
)
1569 timeout
= msecs_to_jiffies(timeout
);
1570 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
1571 res
= tipc_wait_for_connect(sock
, &timeout
);
1586 * listen - allow socket to listen for incoming connections
1587 * @sock: socket structure
1590 * Returns 0 on success, errno otherwise
1592 static int listen(struct socket
*sock
, int len
)
1594 struct sock
*sk
= sock
->sk
;
1599 if (sock
->state
!= SS_UNCONNECTED
)
1602 sock
->state
= SS_LISTENING
;
1610 static int tipc_wait_for_accept(struct socket
*sock
, long timeo
)
1612 struct sock
*sk
= sock
->sk
;
1616 /* True wake-one mechanism for incoming connections: only
1617 * one process gets woken up, not the 'whole herd'.
1618 * Since we do not 'race & poll' for established sockets
1619 * anymore, the common case will execute the loop only once.
1622 prepare_to_wait_exclusive(sk_sleep(sk
), &wait
,
1623 TASK_INTERRUPTIBLE
);
1624 if (skb_queue_empty(&sk
->sk_receive_queue
)) {
1626 timeo
= schedule_timeout(timeo
);
1630 if (!skb_queue_empty(&sk
->sk_receive_queue
))
1633 if (sock
->state
!= SS_LISTENING
)
1635 err
= sock_intr_errno(timeo
);
1636 if (signal_pending(current
))
1642 finish_wait(sk_sleep(sk
), &wait
);
1647 * accept - wait for connection request
1648 * @sock: listening socket
1649 * @newsock: new socket that is to be connected
1650 * @flags: file-related flags associated with socket
1652 * Returns 0 on success, errno otherwise
1654 static int accept(struct socket
*sock
, struct socket
*new_sock
, int flags
)
1656 struct sock
*new_sk
, *sk
= sock
->sk
;
1657 struct sk_buff
*buf
;
1658 struct tipc_sock
*new_tsock
;
1659 struct tipc_port
*new_tport
;
1660 struct tipc_msg
*msg
;
1667 if (sock
->state
!= SS_LISTENING
) {
1672 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
1673 res
= tipc_wait_for_accept(sock
, timeo
);
1677 buf
= skb_peek(&sk
->sk_receive_queue
);
1679 res
= tipc_sk_create(sock_net(sock
->sk
), new_sock
, 0, 1);
1683 new_sk
= new_sock
->sk
;
1684 new_tsock
= tipc_sk(new_sk
);
1685 new_tport
= new_tsock
->p
;
1686 new_ref
= new_tport
->ref
;
1689 /* we lock on new_sk; but lockdep sees the lock on sk */
1690 lock_sock_nested(new_sk
, SINGLE_DEPTH_NESTING
);
1693 * Reject any stray messages received by new socket
1694 * before the socket lock was taken (very, very unlikely)
1696 reject_rx_queue(new_sk
);
1698 /* Connect new socket to it's peer */
1699 new_tsock
->peer_name
.ref
= msg_origport(msg
);
1700 new_tsock
->peer_name
.node
= msg_orignode(msg
);
1701 tipc_connect(new_ref
, &new_tsock
->peer_name
);
1702 new_sock
->state
= SS_CONNECTED
;
1704 tipc_set_portimportance(new_ref
, msg_importance(msg
));
1705 if (msg_named(msg
)) {
1706 new_tport
->conn_type
= msg_nametype(msg
);
1707 new_tport
->conn_instance
= msg_nameinst(msg
);
1711 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
1712 * Respond to 'SYN+' by queuing it on new socket.
1714 if (!msg_data_sz(msg
)) {
1715 struct msghdr m
= {NULL
,};
1717 advance_rx_queue(sk
);
1718 send_packet(NULL
, new_sock
, &m
, 0);
1720 __skb_dequeue(&sk
->sk_receive_queue
);
1721 __skb_queue_head(&new_sk
->sk_receive_queue
, buf
);
1722 skb_set_owner_r(buf
, new_sk
);
1724 release_sock(new_sk
);
1732 * shutdown - shutdown socket connection
1733 * @sock: socket structure
1734 * @how: direction to close (must be SHUT_RDWR)
1736 * Terminates connection (if necessary), then purges socket's receive queue.
1738 * Returns 0 on success, errno otherwise
1740 static int shutdown(struct socket
*sock
, int how
)
1742 struct sock
*sk
= sock
->sk
;
1743 struct tipc_port
*tport
= tipc_sk_port(sk
);
1744 struct sk_buff
*buf
;
1747 if (how
!= SHUT_RDWR
)
1752 switch (sock
->state
) {
1757 /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
1758 buf
= __skb_dequeue(&sk
->sk_receive_queue
);
1760 if (TIPC_SKB_CB(buf
)->handle
!= NULL
) {
1764 tipc_disconnect(tport
->ref
);
1765 tipc_reject_msg(buf
, TIPC_CONN_SHUTDOWN
);
1767 tipc_shutdown(tport
->ref
);
1770 sock
->state
= SS_DISCONNECTING
;
1774 case SS_DISCONNECTING
:
1776 /* Discard any unreceived messages */
1777 __skb_queue_purge(&sk
->sk_receive_queue
);
1779 /* Wake up anyone sleeping in poll */
1780 sk
->sk_state_change(sk
);
1793 * setsockopt - set socket option
1794 * @sock: socket structure
1795 * @lvl: option level
1796 * @opt: option identifier
1797 * @ov: pointer to new option value
1798 * @ol: length of option value
1800 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
1801 * (to ease compatibility).
1803 * Returns 0 on success, errno otherwise
1805 static int setsockopt(struct socket
*sock
, int lvl
, int opt
, char __user
*ov
,
1808 struct sock
*sk
= sock
->sk
;
1809 struct tipc_port
*tport
= tipc_sk_port(sk
);
1813 if ((lvl
== IPPROTO_TCP
) && (sock
->type
== SOCK_STREAM
))
1815 if (lvl
!= SOL_TIPC
)
1816 return -ENOPROTOOPT
;
1817 if (ol
< sizeof(value
))
1819 res
= get_user(value
, (u32 __user
*)ov
);
1826 case TIPC_IMPORTANCE
:
1827 res
= tipc_set_portimportance(tport
->ref
, value
);
1829 case TIPC_SRC_DROPPABLE
:
1830 if (sock
->type
!= SOCK_STREAM
)
1831 res
= tipc_set_portunreliable(tport
->ref
, value
);
1835 case TIPC_DEST_DROPPABLE
:
1836 res
= tipc_set_portunreturnable(tport
->ref
, value
);
1838 case TIPC_CONN_TIMEOUT
:
1839 tipc_sk(sk
)->conn_timeout
= value
;
1840 /* no need to set "res", since already 0 at this point */
1852 * getsockopt - get socket option
1853 * @sock: socket structure
1854 * @lvl: option level
1855 * @opt: option identifier
1856 * @ov: receptacle for option value
1857 * @ol: receptacle for length of option value
1859 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
1860 * (to ease compatibility).
1862 * Returns 0 on success, errno otherwise
1864 static int getsockopt(struct socket
*sock
, int lvl
, int opt
, char __user
*ov
,
1867 struct sock
*sk
= sock
->sk
;
1868 struct tipc_port
*tport
= tipc_sk_port(sk
);
1873 if ((lvl
== IPPROTO_TCP
) && (sock
->type
== SOCK_STREAM
))
1874 return put_user(0, ol
);
1875 if (lvl
!= SOL_TIPC
)
1876 return -ENOPROTOOPT
;
1877 res
= get_user(len
, ol
);
1884 case TIPC_IMPORTANCE
:
1885 res
= tipc_portimportance(tport
->ref
, &value
);
1887 case TIPC_SRC_DROPPABLE
:
1888 res
= tipc_portunreliable(tport
->ref
, &value
);
1890 case TIPC_DEST_DROPPABLE
:
1891 res
= tipc_portunreturnable(tport
->ref
, &value
);
1893 case TIPC_CONN_TIMEOUT
:
1894 value
= tipc_sk(sk
)->conn_timeout
;
1895 /* no need to set "res", since already 0 at this point */
1897 case TIPC_NODE_RECVQ_DEPTH
:
1898 value
= 0; /* was tipc_queue_size, now obsolete */
1900 case TIPC_SOCK_RECVQ_DEPTH
:
1901 value
= skb_queue_len(&sk
->sk_receive_queue
);
1910 return res
; /* "get" failed */
1912 if (len
< sizeof(value
))
1915 if (copy_to_user(ov
, &value
, sizeof(value
)))
1918 return put_user(sizeof(value
), ol
);
1921 /* Protocol switches for the various types of TIPC sockets */
1923 static const struct proto_ops msg_ops
= {
1924 .owner
= THIS_MODULE
,
1929 .socketpair
= sock_no_socketpair
,
1930 .accept
= sock_no_accept
,
1931 .getname
= get_name
,
1933 .ioctl
= sock_no_ioctl
,
1934 .listen
= sock_no_listen
,
1935 .shutdown
= shutdown
,
1936 .setsockopt
= setsockopt
,
1937 .getsockopt
= getsockopt
,
1938 .sendmsg
= send_msg
,
1939 .recvmsg
= recv_msg
,
1940 .mmap
= sock_no_mmap
,
1941 .sendpage
= sock_no_sendpage
1944 static const struct proto_ops packet_ops
= {
1945 .owner
= THIS_MODULE
,
1950 .socketpair
= sock_no_socketpair
,
1952 .getname
= get_name
,
1954 .ioctl
= sock_no_ioctl
,
1956 .shutdown
= shutdown
,
1957 .setsockopt
= setsockopt
,
1958 .getsockopt
= getsockopt
,
1959 .sendmsg
= send_packet
,
1960 .recvmsg
= recv_msg
,
1961 .mmap
= sock_no_mmap
,
1962 .sendpage
= sock_no_sendpage
1965 static const struct proto_ops stream_ops
= {
1966 .owner
= THIS_MODULE
,
1971 .socketpair
= sock_no_socketpair
,
1973 .getname
= get_name
,
1975 .ioctl
= sock_no_ioctl
,
1977 .shutdown
= shutdown
,
1978 .setsockopt
= setsockopt
,
1979 .getsockopt
= getsockopt
,
1980 .sendmsg
= send_stream
,
1981 .recvmsg
= recv_stream
,
1982 .mmap
= sock_no_mmap
,
1983 .sendpage
= sock_no_sendpage
1986 static const struct net_proto_family tipc_family_ops
= {
1987 .owner
= THIS_MODULE
,
1989 .create
= tipc_sk_create
1992 static struct proto tipc_proto
= {
1994 .owner
= THIS_MODULE
,
1995 .obj_size
= sizeof(struct tipc_sock
),
1996 .sysctl_rmem
= sysctl_tipc_rmem
1999 static struct proto tipc_proto_kern
= {
2001 .obj_size
= sizeof(struct tipc_sock
),
2002 .sysctl_rmem
= sysctl_tipc_rmem
2006 * tipc_socket_init - initialize TIPC socket interface
2008 * Returns 0 on success, errno otherwise
2010 int tipc_socket_init(void)
2014 res
= proto_register(&tipc_proto
, 1);
2016 pr_err("Failed to register TIPC protocol type\n");
2020 res
= sock_register(&tipc_family_ops
);
2022 pr_err("Failed to register TIPC socket type\n");
2023 proto_unregister(&tipc_proto
);
2027 sockets_enabled
= 1;
2033 * tipc_socket_stop - stop TIPC socket interface
2035 void tipc_socket_stop(void)
2037 if (!sockets_enabled
)
2040 sockets_enabled
= 0;
2041 sock_unregister(tipc_family_ops
.family
);
2042 proto_unregister(&tipc_proto
);