2 * NET4: Implementation of BSD Unix domain sockets.
4 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 * Linus Torvalds : Assorted bug cures.
13 * Niibe Yutaka : async I/O support.
14 * Carsten Paeth : PF_UNIX check, address fixes.
15 * Alan Cox : Limit size of allocated blocks.
16 * Alan Cox : Fixed the stupid socketpair bug.
17 * Alan Cox : BSD compatibility fine tuning.
18 * Alan Cox : Fixed a bug in connect when interrupted.
19 * Alan Cox : Sorted out a proper draft version of
20 * file descriptor passing hacked up from
22 * Marty Leisner : Fixes to fd passing
23 * Nick Nevin : recvmsg bugfix.
24 * Alan Cox : Started proper garbage collector
25 * Heiko EiBfeldt : Missing verify_area check
26 * Alan Cox : Started POSIXisms
27 * Andreas Schwab : Replace inode by dentry for proper
29 * Kirk Petersen : Made this a module
30 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
32 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
33 * by above two patches.
34 * Andrea Arcangeli : If possible we block in connect(2)
35 * if the max backlog of the listen socket
36 * is been reached. This won't break
37 * old apps and it will avoid huge amount
38 * of socks hashed (this for unix_gc()
39 * performances reasons).
40 * Security fix that limits the max
41 * number of socks to 2*max_files and
42 * the number of skb queueable in the
44 * Artur Skawina : Hash function optimizations
45 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
46 * Malcolm Beattie : Set peercred for socketpair
47 * Michal Ostrowski : Module initialization cleanup.
48 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
49 * the core infrastructure is doing that
50 * for all net proto families now (2.5.69+)
53 * Known differences from reference BSD that was tested:
56 * ECONNREFUSED is not returned from one end of a connected() socket to the
57 * other the moment one end closes.
58 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
59 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
61 * accept() returns a path name even if the connecting socket has closed
62 * in the meantime (BSD loses the path and gives up).
63 * accept() returns 0 length path for an unbound connector. BSD returns 16
64 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
66 * BSD af_unix apparently has connect forgetting to block properly.
67 * (need to check this with the POSIX spec in detail)
69 * Differences from 2.0.0-11-... (ANK)
70 * Bug fixes and improvements.
71 * - client shutdown killed server socket.
72 * - removed all useless cli/sti pairs.
74 * Semantic changes/extensions.
75 * - generic control message passing.
76 * - SCM_CREDENTIALS control message.
77 * - "Abstract" (not FS based) socket bindings.
78 * Abstract names are sequences of bytes (not zero terminated)
79 * started by 0, so that this name space does not intersect
83 #include <linux/module.h>
84 #include <linux/kernel.h>
85 #include <linux/signal.h>
86 #include <linux/sched.h>
87 #include <linux/errno.h>
88 #include <linux/string.h>
89 #include <linux/stat.h>
90 #include <linux/dcache.h>
91 #include <linux/namei.h>
92 #include <linux/socket.h>
94 #include <linux/fcntl.h>
95 #include <linux/termios.h>
96 #include <linux/sockios.h>
97 #include <linux/net.h>
100 #include <linux/slab.h>
101 #include <asm/uaccess.h>
102 #include <linux/skbuff.h>
103 #include <linux/netdevice.h>
104 #include <net/net_namespace.h>
105 #include <net/sock.h>
106 #include <net/tcp_states.h>
107 #include <net/af_unix.h>
108 #include <linux/proc_fs.h>
109 #include <linux/seq_file.h>
111 #include <linux/init.h>
112 #include <linux/poll.h>
113 #include <linux/rtnetlink.h>
114 #include <linux/mount.h>
115 #include <net/checksum.h>
116 #include <linux/security.h>
118 static struct hlist_head unix_socket_table
[UNIX_HASH_SIZE
+ 1];
119 static DEFINE_SPINLOCK(unix_table_lock
);
120 static atomic_long_t unix_nr_socks
;
122 #define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE])
124 #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
126 #ifdef CONFIG_SECURITY_NETWORK
127 static void unix_get_secdata(struct scm_cookie
*scm
, struct sk_buff
*skb
)
129 memcpy(UNIXSID(skb
), &scm
->secid
, sizeof(u32
));
132 static inline void unix_set_secdata(struct scm_cookie
*scm
, struct sk_buff
*skb
)
134 scm
->secid
= *UNIXSID(skb
);
137 static inline void unix_get_secdata(struct scm_cookie
*scm
, struct sk_buff
*skb
)
140 static inline void unix_set_secdata(struct scm_cookie
*scm
, struct sk_buff
*skb
)
142 #endif /* CONFIG_SECURITY_NETWORK */
145 * SMP locking strategy:
146 * hash table is protected with spinlock unix_table_lock
147 * each socket state is protected by separate spin lock.
150 static inline unsigned unix_hash_fold(__wsum n
)
152 unsigned hash
= (__force
unsigned)n
;
155 return hash
&(UNIX_HASH_SIZE
-1);
158 #define unix_peer(sk) (unix_sk(sk)->peer)
160 static inline int unix_our_peer(struct sock
*sk
, struct sock
*osk
)
162 return unix_peer(osk
) == sk
;
165 static inline int unix_may_send(struct sock
*sk
, struct sock
*osk
)
167 return unix_peer(osk
) == NULL
|| unix_our_peer(sk
, osk
);
170 static inline int unix_recvq_full(struct sock
const *sk
)
172 return skb_queue_len(&sk
->sk_receive_queue
) > sk
->sk_max_ack_backlog
;
175 static struct sock
*unix_peer_get(struct sock
*s
)
183 unix_state_unlock(s
);
187 static inline void unix_release_addr(struct unix_address
*addr
)
189 if (atomic_dec_and_test(&addr
->refcnt
))
194 * Check unix socket name:
195 * - should be not zero length.
196 * - if started by not zero, should be NULL terminated (FS object)
197 * - if started by zero, it is abstract name.
200 static int unix_mkname(struct sockaddr_un
*sunaddr
, int len
, unsigned *hashp
)
202 if (len
<= sizeof(short) || len
> sizeof(*sunaddr
))
204 if (!sunaddr
|| sunaddr
->sun_family
!= AF_UNIX
)
206 if (sunaddr
->sun_path
[0]) {
208 * This may look like an off by one error but it is a bit more
209 * subtle. 108 is the longest valid AF_UNIX path for a binding.
210 * sun_path[108] doesnt as such exist. However in kernel space
211 * we are guaranteed that it is a valid memory location in our
212 * kernel address buffer.
214 ((char *)sunaddr
)[len
] = 0;
215 len
= strlen(sunaddr
->sun_path
)+1+sizeof(short);
219 *hashp
= unix_hash_fold(csum_partial(sunaddr
, len
, 0));
223 static void __unix_remove_socket(struct sock
*sk
)
225 sk_del_node_init(sk
);
228 static void __unix_insert_socket(struct hlist_head
*list
, struct sock
*sk
)
230 WARN_ON(!sk_unhashed(sk
));
231 sk_add_node(sk
, list
);
234 static inline void unix_remove_socket(struct sock
*sk
)
236 spin_lock(&unix_table_lock
);
237 __unix_remove_socket(sk
);
238 spin_unlock(&unix_table_lock
);
241 static inline void unix_insert_socket(struct hlist_head
*list
, struct sock
*sk
)
243 spin_lock(&unix_table_lock
);
244 __unix_insert_socket(list
, sk
);
245 spin_unlock(&unix_table_lock
);
248 static struct sock
*__unix_find_socket_byname(struct net
*net
,
249 struct sockaddr_un
*sunname
,
250 int len
, int type
, unsigned hash
)
253 struct hlist_node
*node
;
255 sk_for_each(s
, node
, &unix_socket_table
[hash
^ type
]) {
256 struct unix_sock
*u
= unix_sk(s
);
258 if (!net_eq(sock_net(s
), net
))
261 if (u
->addr
->len
== len
&&
262 !memcmp(u
->addr
->name
, sunname
, len
))
270 static inline struct sock
*unix_find_socket_byname(struct net
*net
,
271 struct sockaddr_un
*sunname
,
277 spin_lock(&unix_table_lock
);
278 s
= __unix_find_socket_byname(net
, sunname
, len
, type
, hash
);
281 spin_unlock(&unix_table_lock
);
285 static struct sock
*unix_find_socket_byinode(struct inode
*i
)
288 struct hlist_node
*node
;
290 spin_lock(&unix_table_lock
);
292 &unix_socket_table
[i
->i_ino
& (UNIX_HASH_SIZE
- 1)]) {
293 struct dentry
*dentry
= unix_sk(s
)->dentry
;
295 if (dentry
&& dentry
->d_inode
== i
) {
302 spin_unlock(&unix_table_lock
);
306 static inline int unix_writable(struct sock
*sk
)
308 return (atomic_read(&sk
->sk_wmem_alloc
) << 2) <= sk
->sk_sndbuf
;
311 static void unix_write_space(struct sock
*sk
)
313 struct socket_wq
*wq
;
316 if (unix_writable(sk
)) {
317 wq
= rcu_dereference(sk
->sk_wq
);
318 if (wq_has_sleeper(wq
))
319 wake_up_interruptible_sync(&wq
->wait
);
320 sk_wake_async(sk
, SOCK_WAKE_SPACE
, POLL_OUT
);
325 /* When dgram socket disconnects (or changes its peer), we clear its receive
326 * queue of packets arrived from previous peer. First, it allows to do
327 * flow control based only on wmem_alloc; second, sk connected to peer
328 * may receive messages only from that peer. */
329 static void unix_dgram_disconnected(struct sock
*sk
, struct sock
*other
)
331 if (!skb_queue_empty(&sk
->sk_receive_queue
)) {
332 skb_queue_purge(&sk
->sk_receive_queue
);
333 wake_up_interruptible_all(&unix_sk(sk
)->peer_wait
);
335 /* If one link of bidirectional dgram pipe is disconnected,
336 * we signal error. Messages are lost. Do not make this,
337 * when peer was not connected to us.
339 if (!sock_flag(other
, SOCK_DEAD
) && unix_peer(other
) == sk
) {
340 other
->sk_err
= ECONNRESET
;
341 other
->sk_error_report(other
);
346 static void unix_sock_destructor(struct sock
*sk
)
348 struct unix_sock
*u
= unix_sk(sk
);
350 skb_queue_purge(&sk
->sk_receive_queue
);
352 WARN_ON(atomic_read(&sk
->sk_wmem_alloc
));
353 WARN_ON(!sk_unhashed(sk
));
354 WARN_ON(sk
->sk_socket
);
355 if (!sock_flag(sk
, SOCK_DEAD
)) {
356 printk(KERN_INFO
"Attempt to release alive unix socket: %p\n", sk
);
361 unix_release_addr(u
->addr
);
363 atomic_long_dec(&unix_nr_socks
);
365 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, -1);
367 #ifdef UNIX_REFCNT_DEBUG
368 printk(KERN_DEBUG
"UNIX %p is destroyed, %ld are still alive.\n", sk
,
369 atomic_long_read(&unix_nr_socks
));
373 static int unix_release_sock(struct sock
*sk
, int embrion
)
375 struct unix_sock
*u
= unix_sk(sk
);
376 struct dentry
*dentry
;
377 struct vfsmount
*mnt
;
382 unix_remove_socket(sk
);
387 sk
->sk_shutdown
= SHUTDOWN_MASK
;
392 state
= sk
->sk_state
;
393 sk
->sk_state
= TCP_CLOSE
;
394 unix_state_unlock(sk
);
396 wake_up_interruptible_all(&u
->peer_wait
);
398 skpair
= unix_peer(sk
);
400 if (skpair
!= NULL
) {
401 if (sk
->sk_type
== SOCK_STREAM
|| sk
->sk_type
== SOCK_SEQPACKET
) {
402 unix_state_lock(skpair
);
404 skpair
->sk_shutdown
= SHUTDOWN_MASK
;
405 if (!skb_queue_empty(&sk
->sk_receive_queue
) || embrion
)
406 skpair
->sk_err
= ECONNRESET
;
407 unix_state_unlock(skpair
);
408 skpair
->sk_state_change(skpair
);
409 sk_wake_async(skpair
, SOCK_WAKE_WAITD
, POLL_HUP
);
411 sock_put(skpair
); /* It may now die */
412 unix_peer(sk
) = NULL
;
415 /* Try to flush out this socket. Throw out buffers at least */
417 while ((skb
= skb_dequeue(&sk
->sk_receive_queue
)) != NULL
) {
418 if (state
== TCP_LISTEN
)
419 unix_release_sock(skb
->sk
, 1);
420 /* passed fds are erased in the kfree_skb hook */
431 /* ---- Socket is dead now and most probably destroyed ---- */
434 * Fixme: BSD difference: In BSD all sockets connected to use get
435 * ECONNRESET and we die on the spot. In Linux we behave
436 * like files and pipes do and wait for the last
439 * Can't we simply set sock->err?
441 * What the above comment does talk about? --ANK(980817)
444 if (unix_tot_inflight
)
445 unix_gc(); /* Garbage collect fds */
450 static void init_peercred(struct sock
*sk
)
452 put_pid(sk
->sk_peer_pid
);
453 if (sk
->sk_peer_cred
)
454 put_cred(sk
->sk_peer_cred
);
455 sk
->sk_peer_pid
= get_pid(task_tgid(current
));
456 sk
->sk_peer_cred
= get_current_cred();
459 static void copy_peercred(struct sock
*sk
, struct sock
*peersk
)
461 put_pid(sk
->sk_peer_pid
);
462 if (sk
->sk_peer_cred
)
463 put_cred(sk
->sk_peer_cred
);
464 sk
->sk_peer_pid
= get_pid(peersk
->sk_peer_pid
);
465 sk
->sk_peer_cred
= get_cred(peersk
->sk_peer_cred
);
468 static int unix_listen(struct socket
*sock
, int backlog
)
471 struct sock
*sk
= sock
->sk
;
472 struct unix_sock
*u
= unix_sk(sk
);
473 struct pid
*old_pid
= NULL
;
474 const struct cred
*old_cred
= NULL
;
477 if (sock
->type
!= SOCK_STREAM
&& sock
->type
!= SOCK_SEQPACKET
)
478 goto out
; /* Only stream/seqpacket sockets accept */
481 goto out
; /* No listens on an unbound socket */
483 if (sk
->sk_state
!= TCP_CLOSE
&& sk
->sk_state
!= TCP_LISTEN
)
485 if (backlog
> sk
->sk_max_ack_backlog
)
486 wake_up_interruptible_all(&u
->peer_wait
);
487 sk
->sk_max_ack_backlog
= backlog
;
488 sk
->sk_state
= TCP_LISTEN
;
489 /* set credentials so connect can copy them */
494 unix_state_unlock(sk
);
502 static int unix_release(struct socket
*);
503 static int unix_bind(struct socket
*, struct sockaddr
*, int);
504 static int unix_stream_connect(struct socket
*, struct sockaddr
*,
505 int addr_len
, int flags
);
506 static int unix_socketpair(struct socket
*, struct socket
*);
507 static int unix_accept(struct socket
*, struct socket
*, int);
508 static int unix_getname(struct socket
*, struct sockaddr
*, int *, int);
509 static unsigned int unix_poll(struct file
*, struct socket
*, poll_table
*);
510 static unsigned int unix_dgram_poll(struct file
*, struct socket
*,
512 static int unix_ioctl(struct socket
*, unsigned int, unsigned long);
513 static int unix_shutdown(struct socket
*, int);
514 static int unix_stream_sendmsg(struct kiocb
*, struct socket
*,
515 struct msghdr
*, size_t);
516 static int unix_stream_recvmsg(struct kiocb
*, struct socket
*,
517 struct msghdr
*, size_t, int);
518 static int unix_dgram_sendmsg(struct kiocb
*, struct socket
*,
519 struct msghdr
*, size_t);
520 static int unix_dgram_recvmsg(struct kiocb
*, struct socket
*,
521 struct msghdr
*, size_t, int);
522 static int unix_dgram_connect(struct socket
*, struct sockaddr
*,
524 static int unix_seqpacket_sendmsg(struct kiocb
*, struct socket
*,
525 struct msghdr
*, size_t);
527 static const struct proto_ops unix_stream_ops
= {
529 .owner
= THIS_MODULE
,
530 .release
= unix_release
,
532 .connect
= unix_stream_connect
,
533 .socketpair
= unix_socketpair
,
534 .accept
= unix_accept
,
535 .getname
= unix_getname
,
538 .listen
= unix_listen
,
539 .shutdown
= unix_shutdown
,
540 .setsockopt
= sock_no_setsockopt
,
541 .getsockopt
= sock_no_getsockopt
,
542 .sendmsg
= unix_stream_sendmsg
,
543 .recvmsg
= unix_stream_recvmsg
,
544 .mmap
= sock_no_mmap
,
545 .sendpage
= sock_no_sendpage
,
548 static const struct proto_ops unix_dgram_ops
= {
550 .owner
= THIS_MODULE
,
551 .release
= unix_release
,
553 .connect
= unix_dgram_connect
,
554 .socketpair
= unix_socketpair
,
555 .accept
= sock_no_accept
,
556 .getname
= unix_getname
,
557 .poll
= unix_dgram_poll
,
559 .listen
= sock_no_listen
,
560 .shutdown
= unix_shutdown
,
561 .setsockopt
= sock_no_setsockopt
,
562 .getsockopt
= sock_no_getsockopt
,
563 .sendmsg
= unix_dgram_sendmsg
,
564 .recvmsg
= unix_dgram_recvmsg
,
565 .mmap
= sock_no_mmap
,
566 .sendpage
= sock_no_sendpage
,
569 static const struct proto_ops unix_seqpacket_ops
= {
571 .owner
= THIS_MODULE
,
572 .release
= unix_release
,
574 .connect
= unix_stream_connect
,
575 .socketpair
= unix_socketpair
,
576 .accept
= unix_accept
,
577 .getname
= unix_getname
,
578 .poll
= unix_dgram_poll
,
580 .listen
= unix_listen
,
581 .shutdown
= unix_shutdown
,
582 .setsockopt
= sock_no_setsockopt
,
583 .getsockopt
= sock_no_getsockopt
,
584 .sendmsg
= unix_seqpacket_sendmsg
,
585 .recvmsg
= unix_dgram_recvmsg
,
586 .mmap
= sock_no_mmap
,
587 .sendpage
= sock_no_sendpage
,
590 static struct proto unix_proto
= {
592 .owner
= THIS_MODULE
,
593 .obj_size
= sizeof(struct unix_sock
),
597 * AF_UNIX sockets do not interact with hardware, hence they
598 * dont trigger interrupts - so it's safe for them to have
599 * bh-unsafe locking for their sk_receive_queue.lock. Split off
600 * this special lock-class by reinitializing the spinlock key:
602 static struct lock_class_key af_unix_sk_receive_queue_lock_key
;
604 static struct sock
*unix_create1(struct net
*net
, struct socket
*sock
)
606 struct sock
*sk
= NULL
;
609 atomic_long_inc(&unix_nr_socks
);
610 if (atomic_long_read(&unix_nr_socks
) > 2 * get_max_files())
613 sk
= sk_alloc(net
, PF_UNIX
, GFP_KERNEL
, &unix_proto
);
617 sock_init_data(sock
, sk
);
618 lockdep_set_class(&sk
->sk_receive_queue
.lock
,
619 &af_unix_sk_receive_queue_lock_key
);
621 sk
->sk_write_space
= unix_write_space
;
622 sk
->sk_max_ack_backlog
= net
->unx
.sysctl_max_dgram_qlen
;
623 sk
->sk_destruct
= unix_sock_destructor
;
627 spin_lock_init(&u
->lock
);
628 atomic_long_set(&u
->inflight
, 0);
629 INIT_LIST_HEAD(&u
->link
);
630 mutex_init(&u
->readlock
); /* single task reading lock */
631 init_waitqueue_head(&u
->peer_wait
);
632 unix_insert_socket(unix_sockets_unbound
, sk
);
635 atomic_long_dec(&unix_nr_socks
);
638 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, 1);
644 static int unix_create(struct net
*net
, struct socket
*sock
, int protocol
,
647 if (protocol
&& protocol
!= PF_UNIX
)
648 return -EPROTONOSUPPORT
;
650 sock
->state
= SS_UNCONNECTED
;
652 switch (sock
->type
) {
654 sock
->ops
= &unix_stream_ops
;
657 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
661 sock
->type
= SOCK_DGRAM
;
663 sock
->ops
= &unix_dgram_ops
;
666 sock
->ops
= &unix_seqpacket_ops
;
669 return -ESOCKTNOSUPPORT
;
672 return unix_create1(net
, sock
) ? 0 : -ENOMEM
;
675 static int unix_release(struct socket
*sock
)
677 struct sock
*sk
= sock
->sk
;
684 return unix_release_sock(sk
, 0);
687 static int unix_autobind(struct socket
*sock
)
689 struct sock
*sk
= sock
->sk
;
690 struct net
*net
= sock_net(sk
);
691 struct unix_sock
*u
= unix_sk(sk
);
692 static u32 ordernum
= 1;
693 struct unix_address
*addr
;
695 unsigned int retries
= 0;
697 mutex_lock(&u
->readlock
);
704 addr
= kzalloc(sizeof(*addr
) + sizeof(short) + 16, GFP_KERNEL
);
708 addr
->name
->sun_family
= AF_UNIX
;
709 atomic_set(&addr
->refcnt
, 1);
712 addr
->len
= sprintf(addr
->name
->sun_path
+1, "%05x", ordernum
) + 1 + sizeof(short);
713 addr
->hash
= unix_hash_fold(csum_partial(addr
->name
, addr
->len
, 0));
715 spin_lock(&unix_table_lock
);
716 ordernum
= (ordernum
+1)&0xFFFFF;
718 if (__unix_find_socket_byname(net
, addr
->name
, addr
->len
, sock
->type
,
720 spin_unlock(&unix_table_lock
);
722 * __unix_find_socket_byname() may take long time if many names
723 * are already in use.
726 /* Give up if all names seems to be in use. */
727 if (retries
++ == 0xFFFFF) {
734 addr
->hash
^= sk
->sk_type
;
736 __unix_remove_socket(sk
);
738 __unix_insert_socket(&unix_socket_table
[addr
->hash
], sk
);
739 spin_unlock(&unix_table_lock
);
742 out
: mutex_unlock(&u
->readlock
);
746 static struct sock
*unix_find_other(struct net
*net
,
747 struct sockaddr_un
*sunname
, int len
,
748 int type
, unsigned hash
, int *error
)
754 if (sunname
->sun_path
[0]) {
756 err
= kern_path(sunname
->sun_path
, LOOKUP_FOLLOW
, &path
);
759 inode
= path
.dentry
->d_inode
;
760 err
= inode_permission(inode
, MAY_WRITE
);
765 if (!S_ISSOCK(inode
->i_mode
))
767 u
= unix_find_socket_byinode(inode
);
771 if (u
->sk_type
== type
)
772 touch_atime(path
.mnt
, path
.dentry
);
777 if (u
->sk_type
!= type
) {
783 u
= unix_find_socket_byname(net
, sunname
, len
, type
, hash
);
785 struct dentry
*dentry
;
786 dentry
= unix_sk(u
)->dentry
;
788 touch_atime(unix_sk(u
)->mnt
, dentry
);
802 static int unix_bind(struct socket
*sock
, struct sockaddr
*uaddr
, int addr_len
)
804 struct sock
*sk
= sock
->sk
;
805 struct net
*net
= sock_net(sk
);
806 struct unix_sock
*u
= unix_sk(sk
);
807 struct sockaddr_un
*sunaddr
= (struct sockaddr_un
*)uaddr
;
808 struct dentry
*dentry
= NULL
;
812 struct unix_address
*addr
;
813 struct hlist_head
*list
;
816 if (sunaddr
->sun_family
!= AF_UNIX
)
819 if (addr_len
== sizeof(short)) {
820 err
= unix_autobind(sock
);
824 err
= unix_mkname(sunaddr
, addr_len
, &hash
);
829 mutex_lock(&u
->readlock
);
836 addr
= kmalloc(sizeof(*addr
)+addr_len
, GFP_KERNEL
);
840 memcpy(addr
->name
, sunaddr
, addr_len
);
841 addr
->len
= addr_len
;
842 addr
->hash
= hash
^ sk
->sk_type
;
843 atomic_set(&addr
->refcnt
, 1);
845 if (sunaddr
->sun_path
[0]) {
849 * Get the parent directory, calculate the hash for last
852 err
= path_lookup(sunaddr
->sun_path
, LOOKUP_PARENT
, &nd
);
854 goto out_mknod_parent
;
856 dentry
= lookup_create(&nd
, 0);
857 err
= PTR_ERR(dentry
);
859 goto out_mknod_unlock
;
862 * All right, let's create it.
865 (SOCK_INODE(sock
)->i_mode
& ~current_umask());
866 err
= mnt_want_write(nd
.path
.mnt
);
869 err
= security_path_mknod(&nd
.path
, dentry
, mode
, 0);
871 goto out_mknod_drop_write
;
872 err
= vfs_mknod(nd
.path
.dentry
->d_inode
, dentry
, mode
, 0);
873 out_mknod_drop_write
:
874 mnt_drop_write(nd
.path
.mnt
);
877 mutex_unlock(&nd
.path
.dentry
->d_inode
->i_mutex
);
878 dput(nd
.path
.dentry
);
879 nd
.path
.dentry
= dentry
;
881 addr
->hash
= UNIX_HASH_SIZE
;
884 spin_lock(&unix_table_lock
);
886 if (!sunaddr
->sun_path
[0]) {
888 if (__unix_find_socket_byname(net
, sunaddr
, addr_len
,
889 sk
->sk_type
, hash
)) {
890 unix_release_addr(addr
);
894 list
= &unix_socket_table
[addr
->hash
];
896 list
= &unix_socket_table
[dentry
->d_inode
->i_ino
& (UNIX_HASH_SIZE
-1)];
897 u
->dentry
= nd
.path
.dentry
;
898 u
->mnt
= nd
.path
.mnt
;
902 __unix_remove_socket(sk
);
904 __unix_insert_socket(list
, sk
);
907 spin_unlock(&unix_table_lock
);
909 mutex_unlock(&u
->readlock
);
916 mutex_unlock(&nd
.path
.dentry
->d_inode
->i_mutex
);
921 unix_release_addr(addr
);
925 static void unix_state_double_lock(struct sock
*sk1
, struct sock
*sk2
)
927 if (unlikely(sk1
== sk2
) || !sk2
) {
928 unix_state_lock(sk1
);
932 unix_state_lock(sk1
);
933 unix_state_lock_nested(sk2
);
935 unix_state_lock(sk2
);
936 unix_state_lock_nested(sk1
);
940 static void unix_state_double_unlock(struct sock
*sk1
, struct sock
*sk2
)
942 if (unlikely(sk1
== sk2
) || !sk2
) {
943 unix_state_unlock(sk1
);
946 unix_state_unlock(sk1
);
947 unix_state_unlock(sk2
);
950 static int unix_dgram_connect(struct socket
*sock
, struct sockaddr
*addr
,
953 struct sock
*sk
= sock
->sk
;
954 struct net
*net
= sock_net(sk
);
955 struct sockaddr_un
*sunaddr
= (struct sockaddr_un
*)addr
;
960 if (addr
->sa_family
!= AF_UNSPEC
) {
961 err
= unix_mkname(sunaddr
, alen
, &hash
);
966 if (test_bit(SOCK_PASSCRED
, &sock
->flags
) &&
967 !unix_sk(sk
)->addr
&& (err
= unix_autobind(sock
)) != 0)
971 other
= unix_find_other(net
, sunaddr
, alen
, sock
->type
, hash
, &err
);
975 unix_state_double_lock(sk
, other
);
977 /* Apparently VFS overslept socket death. Retry. */
978 if (sock_flag(other
, SOCK_DEAD
)) {
979 unix_state_double_unlock(sk
, other
);
985 if (!unix_may_send(sk
, other
))
988 err
= security_unix_may_send(sk
->sk_socket
, other
->sk_socket
);
994 * 1003.1g breaking connected state with AF_UNSPEC
997 unix_state_double_lock(sk
, other
);
1001 * If it was connected, reconnect.
1003 if (unix_peer(sk
)) {
1004 struct sock
*old_peer
= unix_peer(sk
);
1005 unix_peer(sk
) = other
;
1006 unix_state_double_unlock(sk
, other
);
1008 if (other
!= old_peer
)
1009 unix_dgram_disconnected(sk
, old_peer
);
1012 unix_peer(sk
) = other
;
1013 unix_state_double_unlock(sk
, other
);
1018 unix_state_double_unlock(sk
, other
);
1024 static long unix_wait_for_peer(struct sock
*other
, long timeo
)
1026 struct unix_sock
*u
= unix_sk(other
);
1030 prepare_to_wait_exclusive(&u
->peer_wait
, &wait
, TASK_INTERRUPTIBLE
);
1032 sched
= !sock_flag(other
, SOCK_DEAD
) &&
1033 !(other
->sk_shutdown
& RCV_SHUTDOWN
) &&
1034 unix_recvq_full(other
);
1036 unix_state_unlock(other
);
1039 timeo
= schedule_timeout(timeo
);
1041 finish_wait(&u
->peer_wait
, &wait
);
1045 static int unix_stream_connect(struct socket
*sock
, struct sockaddr
*uaddr
,
1046 int addr_len
, int flags
)
1048 struct sockaddr_un
*sunaddr
= (struct sockaddr_un
*)uaddr
;
1049 struct sock
*sk
= sock
->sk
;
1050 struct net
*net
= sock_net(sk
);
1051 struct unix_sock
*u
= unix_sk(sk
), *newu
, *otheru
;
1052 struct sock
*newsk
= NULL
;
1053 struct sock
*other
= NULL
;
1054 struct sk_buff
*skb
= NULL
;
1060 err
= unix_mkname(sunaddr
, addr_len
, &hash
);
1065 if (test_bit(SOCK_PASSCRED
, &sock
->flags
) && !u
->addr
&&
1066 (err
= unix_autobind(sock
)) != 0)
1069 timeo
= sock_sndtimeo(sk
, flags
& O_NONBLOCK
);
1071 /* First of all allocate resources.
1072 If we will make it after state is locked,
1073 we will have to recheck all again in any case.
1078 /* create new sock for complete connection */
1079 newsk
= unix_create1(sock_net(sk
), NULL
);
1083 /* Allocate skb for sending to listening sock */
1084 skb
= sock_wmalloc(newsk
, 1, 0, GFP_KERNEL
);
1089 /* Find listening sock. */
1090 other
= unix_find_other(net
, sunaddr
, addr_len
, sk
->sk_type
, hash
, &err
);
1094 /* Latch state of peer */
1095 unix_state_lock(other
);
1097 /* Apparently VFS overslept socket death. Retry. */
1098 if (sock_flag(other
, SOCK_DEAD
)) {
1099 unix_state_unlock(other
);
1104 err
= -ECONNREFUSED
;
1105 if (other
->sk_state
!= TCP_LISTEN
)
1107 if (other
->sk_shutdown
& RCV_SHUTDOWN
)
1110 if (unix_recvq_full(other
)) {
1115 timeo
= unix_wait_for_peer(other
, timeo
);
1117 err
= sock_intr_errno(timeo
);
1118 if (signal_pending(current
))
1126 It is tricky place. We need to grab write lock and cannot
1127 drop lock on peer. It is dangerous because deadlock is
1128 possible. Connect to self case and simultaneous
1129 attempt to connect are eliminated by checking socket
1130 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1131 check this before attempt to grab lock.
1133 Well, and we have to recheck the state after socket locked.
1139 /* This is ok... continue with connect */
1141 case TCP_ESTABLISHED
:
1142 /* Socket is already connected */
1150 unix_state_lock_nested(sk
);
1152 if (sk
->sk_state
!= st
) {
1153 unix_state_unlock(sk
);
1154 unix_state_unlock(other
);
1159 err
= security_unix_stream_connect(sock
, other
->sk_socket
, newsk
);
1161 unix_state_unlock(sk
);
1165 /* The way is open! Fastly set all the necessary fields... */
1168 unix_peer(newsk
) = sk
;
1169 newsk
->sk_state
= TCP_ESTABLISHED
;
1170 newsk
->sk_type
= sk
->sk_type
;
1171 init_peercred(newsk
);
1172 newu
= unix_sk(newsk
);
1173 newsk
->sk_wq
= &newu
->peer_wq
;
1174 otheru
= unix_sk(other
);
1176 /* copy address information from listening to new sock*/
1178 atomic_inc(&otheru
->addr
->refcnt
);
1179 newu
->addr
= otheru
->addr
;
1181 if (otheru
->dentry
) {
1182 newu
->dentry
= dget(otheru
->dentry
);
1183 newu
->mnt
= mntget(otheru
->mnt
);
1186 /* Set credentials */
1187 copy_peercred(sk
, other
);
1189 sock
->state
= SS_CONNECTED
;
1190 sk
->sk_state
= TCP_ESTABLISHED
;
1193 smp_mb__after_atomic_inc(); /* sock_hold() does an atomic_inc() */
1194 unix_peer(sk
) = newsk
;
1196 unix_state_unlock(sk
);
1198 /* take ten and and send info to listening sock */
1199 spin_lock(&other
->sk_receive_queue
.lock
);
1200 __skb_queue_tail(&other
->sk_receive_queue
, skb
);
1201 spin_unlock(&other
->sk_receive_queue
.lock
);
1202 unix_state_unlock(other
);
1203 other
->sk_data_ready(other
, 0);
1209 unix_state_unlock(other
);
1214 unix_release_sock(newsk
, 0);
1220 static int unix_socketpair(struct socket
*socka
, struct socket
*sockb
)
1222 struct sock
*ska
= socka
->sk
, *skb
= sockb
->sk
;
1224 /* Join our sockets back to back */
1227 unix_peer(ska
) = skb
;
1228 unix_peer(skb
) = ska
;
1232 if (ska
->sk_type
!= SOCK_DGRAM
) {
1233 ska
->sk_state
= TCP_ESTABLISHED
;
1234 skb
->sk_state
= TCP_ESTABLISHED
;
1235 socka
->state
= SS_CONNECTED
;
1236 sockb
->state
= SS_CONNECTED
;
1241 static int unix_accept(struct socket
*sock
, struct socket
*newsock
, int flags
)
1243 struct sock
*sk
= sock
->sk
;
1245 struct sk_buff
*skb
;
1249 if (sock
->type
!= SOCK_STREAM
&& sock
->type
!= SOCK_SEQPACKET
)
1253 if (sk
->sk_state
!= TCP_LISTEN
)
1256 /* If socket state is TCP_LISTEN it cannot change (for now...),
1257 * so that no locks are necessary.
1260 skb
= skb_recv_datagram(sk
, 0, flags
&O_NONBLOCK
, &err
);
1262 /* This means receive shutdown. */
1269 skb_free_datagram(sk
, skb
);
1270 wake_up_interruptible(&unix_sk(sk
)->peer_wait
);
1272 /* attach accepted sock to socket */
1273 unix_state_lock(tsk
);
1274 newsock
->state
= SS_CONNECTED
;
1275 sock_graft(tsk
, newsock
);
1276 unix_state_unlock(tsk
);
1284 static int unix_getname(struct socket
*sock
, struct sockaddr
*uaddr
, int *uaddr_len
, int peer
)
1286 struct sock
*sk
= sock
->sk
;
1287 struct unix_sock
*u
;
1288 DECLARE_SOCKADDR(struct sockaddr_un
*, sunaddr
, uaddr
);
1292 sk
= unix_peer_get(sk
);
1303 unix_state_lock(sk
);
1305 sunaddr
->sun_family
= AF_UNIX
;
1306 sunaddr
->sun_path
[0] = 0;
1307 *uaddr_len
= sizeof(short);
1309 struct unix_address
*addr
= u
->addr
;
1311 *uaddr_len
= addr
->len
;
1312 memcpy(sunaddr
, addr
->name
, *uaddr_len
);
1314 unix_state_unlock(sk
);
1320 static void unix_detach_fds(struct scm_cookie
*scm
, struct sk_buff
*skb
)
1324 scm
->fp
= UNIXCB(skb
).fp
;
1325 UNIXCB(skb
).fp
= NULL
;
1327 for (i
= scm
->fp
->count
-1; i
>= 0; i
--)
1328 unix_notinflight(scm
->fp
->fp
[i
]);
1331 static void unix_destruct_scm(struct sk_buff
*skb
)
1333 struct scm_cookie scm
;
1334 memset(&scm
, 0, sizeof(scm
));
1335 scm
.pid
= UNIXCB(skb
).pid
;
1336 scm
.cred
= UNIXCB(skb
).cred
;
1338 unix_detach_fds(&scm
, skb
);
1340 /* Alas, it calls VFS */
1341 /* So fscking what? fput() had been SMP-safe since the last Summer */
1346 static int unix_attach_fds(struct scm_cookie
*scm
, struct sk_buff
*skb
)
1351 * Need to duplicate file references for the sake of garbage
1352 * collection. Otherwise a socket in the fps might become a
1353 * candidate for GC while the skb is not yet queued.
1355 UNIXCB(skb
).fp
= scm_fp_dup(scm
->fp
);
1356 if (!UNIXCB(skb
).fp
)
1359 for (i
= scm
->fp
->count
-1; i
>= 0; i
--)
1360 unix_inflight(scm
->fp
->fp
[i
]);
1364 static int unix_scm_to_skb(struct scm_cookie
*scm
, struct sk_buff
*skb
, bool send_fds
)
1367 UNIXCB(skb
).pid
= get_pid(scm
->pid
);
1368 UNIXCB(skb
).cred
= get_cred(scm
->cred
);
1369 UNIXCB(skb
).fp
= NULL
;
1370 if (scm
->fp
&& send_fds
)
1371 err
= unix_attach_fds(scm
, skb
);
1373 skb
->destructor
= unix_destruct_scm
;
1378 * Send AF_UNIX data.
1381 static int unix_dgram_sendmsg(struct kiocb
*kiocb
, struct socket
*sock
,
1382 struct msghdr
*msg
, size_t len
)
1384 struct sock_iocb
*siocb
= kiocb_to_siocb(kiocb
);
1385 struct sock
*sk
= sock
->sk
;
1386 struct net
*net
= sock_net(sk
);
1387 struct unix_sock
*u
= unix_sk(sk
);
1388 struct sockaddr_un
*sunaddr
= msg
->msg_name
;
1389 struct sock
*other
= NULL
;
1390 int namelen
= 0; /* fake GCC */
1393 struct sk_buff
*skb
;
1395 struct scm_cookie tmp_scm
;
1397 if (NULL
== siocb
->scm
)
1398 siocb
->scm
= &tmp_scm
;
1400 err
= scm_send(sock
, msg
, siocb
->scm
);
1405 if (msg
->msg_flags
&MSG_OOB
)
1408 if (msg
->msg_namelen
) {
1409 err
= unix_mkname(sunaddr
, msg
->msg_namelen
, &hash
);
1416 other
= unix_peer_get(sk
);
1421 if (test_bit(SOCK_PASSCRED
, &sock
->flags
) && !u
->addr
1422 && (err
= unix_autobind(sock
)) != 0)
1426 if (len
> sk
->sk_sndbuf
- 32)
1429 skb
= sock_alloc_send_skb(sk
, len
, msg
->msg_flags
&MSG_DONTWAIT
, &err
);
1433 err
= unix_scm_to_skb(siocb
->scm
, skb
, true);
1436 unix_get_secdata(siocb
->scm
, skb
);
1438 skb_reset_transport_header(skb
);
1439 err
= memcpy_fromiovec(skb_put(skb
, len
), msg
->msg_iov
, len
);
1443 timeo
= sock_sndtimeo(sk
, msg
->msg_flags
& MSG_DONTWAIT
);
1448 if (sunaddr
== NULL
)
1451 other
= unix_find_other(net
, sunaddr
, namelen
, sk
->sk_type
,
1457 unix_state_lock(other
);
1459 if (!unix_may_send(sk
, other
))
1462 if (sock_flag(other
, SOCK_DEAD
)) {
1464 * Check with 1003.1g - what should
1467 unix_state_unlock(other
);
1471 unix_state_lock(sk
);
1472 if (unix_peer(sk
) == other
) {
1473 unix_peer(sk
) = NULL
;
1474 unix_state_unlock(sk
);
1476 unix_dgram_disconnected(sk
, other
);
1478 err
= -ECONNREFUSED
;
1480 unix_state_unlock(sk
);
1490 if (other
->sk_shutdown
& RCV_SHUTDOWN
)
1493 if (sk
->sk_type
!= SOCK_SEQPACKET
) {
1494 err
= security_unix_may_send(sk
->sk_socket
, other
->sk_socket
);
1499 if (unix_peer(other
) != sk
&& unix_recvq_full(other
)) {
1505 timeo
= unix_wait_for_peer(other
, timeo
);
1507 err
= sock_intr_errno(timeo
);
1508 if (signal_pending(current
))
1514 if (sock_flag(other
, SOCK_RCVTSTAMP
))
1515 __net_timestamp(skb
);
1516 skb_queue_tail(&other
->sk_receive_queue
, skb
);
1517 unix_state_unlock(other
);
1518 other
->sk_data_ready(other
, len
);
1520 scm_destroy(siocb
->scm
);
1524 unix_state_unlock(other
);
1530 scm_destroy(siocb
->scm
);
1535 static int unix_stream_sendmsg(struct kiocb
*kiocb
, struct socket
*sock
,
1536 struct msghdr
*msg
, size_t len
)
1538 struct sock_iocb
*siocb
= kiocb_to_siocb(kiocb
);
1539 struct sock
*sk
= sock
->sk
;
1540 struct sock
*other
= NULL
;
1541 struct sockaddr_un
*sunaddr
= msg
->msg_name
;
1543 struct sk_buff
*skb
;
1545 struct scm_cookie tmp_scm
;
1546 bool fds_sent
= false;
1548 if (NULL
== siocb
->scm
)
1549 siocb
->scm
= &tmp_scm
;
1551 err
= scm_send(sock
, msg
, siocb
->scm
);
1556 if (msg
->msg_flags
&MSG_OOB
)
1559 if (msg
->msg_namelen
) {
1560 err
= sk
->sk_state
== TCP_ESTABLISHED
? -EISCONN
: -EOPNOTSUPP
;
1565 other
= unix_peer(sk
);
1570 if (sk
->sk_shutdown
& SEND_SHUTDOWN
)
1573 while (sent
< len
) {
1575 * Optimisation for the fact that under 0.01% of X
1576 * messages typically need breaking up.
1581 /* Keep two messages in the pipe so it schedules better */
1582 if (size
> ((sk
->sk_sndbuf
>> 1) - 64))
1583 size
= (sk
->sk_sndbuf
>> 1) - 64;
1585 if (size
> SKB_MAX_ALLOC
)
1586 size
= SKB_MAX_ALLOC
;
1592 skb
= sock_alloc_send_skb(sk
, size
, msg
->msg_flags
&MSG_DONTWAIT
,
1599 * If you pass two values to the sock_alloc_send_skb
1600 * it tries to grab the large buffer with GFP_NOFS
1601 * (which can fail easily), and if it fails grab the
1602 * fallback size buffer which is under a page and will
1605 size
= min_t(int, size
, skb_tailroom(skb
));
1608 /* Only send the fds in the first buffer */
1609 err
= unix_scm_to_skb(siocb
->scm
, skb
, !fds_sent
);
1616 err
= memcpy_fromiovec(skb_put(skb
, size
), msg
->msg_iov
, size
);
1622 unix_state_lock(other
);
1624 if (sock_flag(other
, SOCK_DEAD
) ||
1625 (other
->sk_shutdown
& RCV_SHUTDOWN
))
1628 skb_queue_tail(&other
->sk_receive_queue
, skb
);
1629 unix_state_unlock(other
);
1630 other
->sk_data_ready(other
, size
);
1634 scm_destroy(siocb
->scm
);
1640 unix_state_unlock(other
);
1643 if (sent
== 0 && !(msg
->msg_flags
&MSG_NOSIGNAL
))
1644 send_sig(SIGPIPE
, current
, 0);
1647 scm_destroy(siocb
->scm
);
1649 return sent
? : err
;
1652 static int unix_seqpacket_sendmsg(struct kiocb
*kiocb
, struct socket
*sock
,
1653 struct msghdr
*msg
, size_t len
)
1656 struct sock
*sk
= sock
->sk
;
1658 err
= sock_error(sk
);
1662 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1665 if (msg
->msg_namelen
)
1666 msg
->msg_namelen
= 0;
1668 return unix_dgram_sendmsg(kiocb
, sock
, msg
, len
);
1671 static void unix_copy_addr(struct msghdr
*msg
, struct sock
*sk
)
1673 struct unix_sock
*u
= unix_sk(sk
);
1675 msg
->msg_namelen
= 0;
1677 msg
->msg_namelen
= u
->addr
->len
;
1678 memcpy(msg
->msg_name
, u
->addr
->name
, u
->addr
->len
);
1682 static int unix_dgram_recvmsg(struct kiocb
*iocb
, struct socket
*sock
,
1683 struct msghdr
*msg
, size_t size
,
1686 struct sock_iocb
*siocb
= kiocb_to_siocb(iocb
);
1687 struct scm_cookie tmp_scm
;
1688 struct sock
*sk
= sock
->sk
;
1689 struct unix_sock
*u
= unix_sk(sk
);
1690 int noblock
= flags
& MSG_DONTWAIT
;
1691 struct sk_buff
*skb
;
1698 msg
->msg_namelen
= 0;
1700 mutex_lock(&u
->readlock
);
1702 skb
= skb_recv_datagram(sk
, flags
, noblock
, &err
);
1704 unix_state_lock(sk
);
1705 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
1706 if (sk
->sk_type
== SOCK_SEQPACKET
&& err
== -EAGAIN
&&
1707 (sk
->sk_shutdown
& RCV_SHUTDOWN
))
1709 unix_state_unlock(sk
);
1713 wake_up_interruptible_sync(&u
->peer_wait
);
1716 unix_copy_addr(msg
, skb
->sk
);
1718 if (size
> skb
->len
)
1720 else if (size
< skb
->len
)
1721 msg
->msg_flags
|= MSG_TRUNC
;
1723 err
= skb_copy_datagram_iovec(skb
, 0, msg
->msg_iov
, size
);
1727 if (sock_flag(sk
, SOCK_RCVTSTAMP
))
1728 __sock_recv_timestamp(msg
, sk
, skb
);
1731 siocb
->scm
= &tmp_scm
;
1732 memset(&tmp_scm
, 0, sizeof(tmp_scm
));
1734 scm_set_cred(siocb
->scm
, UNIXCB(skb
).pid
, UNIXCB(skb
).cred
);
1735 unix_set_secdata(siocb
->scm
, skb
);
1737 if (!(flags
& MSG_PEEK
)) {
1739 unix_detach_fds(siocb
->scm
, skb
);
1741 /* It is questionable: on PEEK we could:
1742 - do not return fds - good, but too simple 8)
1743 - return fds, and do not return them on read (old strategy,
1745 - clone fds (I chose it for now, it is the most universal
1748 POSIX 1003.1g does not actually define this clearly
1749 at all. POSIX 1003.1g doesn't define a lot of things
1754 siocb
->scm
->fp
= scm_fp_dup(UNIXCB(skb
).fp
);
1758 scm_recv(sock
, msg
, siocb
->scm
, flags
);
1761 skb_free_datagram(sk
, skb
);
1763 mutex_unlock(&u
->readlock
);
1769 * Sleep until data has arrive. But check for races..
1772 static long unix_stream_data_wait(struct sock
*sk
, long timeo
)
1776 unix_state_lock(sk
);
1779 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
1781 if (!skb_queue_empty(&sk
->sk_receive_queue
) ||
1783 (sk
->sk_shutdown
& RCV_SHUTDOWN
) ||
1784 signal_pending(current
) ||
1788 set_bit(SOCK_ASYNC_WAITDATA
, &sk
->sk_socket
->flags
);
1789 unix_state_unlock(sk
);
1790 timeo
= schedule_timeout(timeo
);
1791 unix_state_lock(sk
);
1792 clear_bit(SOCK_ASYNC_WAITDATA
, &sk
->sk_socket
->flags
);
1795 finish_wait(sk_sleep(sk
), &wait
);
1796 unix_state_unlock(sk
);
1802 static int unix_stream_recvmsg(struct kiocb
*iocb
, struct socket
*sock
,
1803 struct msghdr
*msg
, size_t size
,
1806 struct sock_iocb
*siocb
= kiocb_to_siocb(iocb
);
1807 struct scm_cookie tmp_scm
;
1808 struct sock
*sk
= sock
->sk
;
1809 struct unix_sock
*u
= unix_sk(sk
);
1810 struct sockaddr_un
*sunaddr
= msg
->msg_name
;
1812 int check_creds
= 0;
1818 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1825 target
= sock_rcvlowat(sk
, flags
&MSG_WAITALL
, size
);
1826 timeo
= sock_rcvtimeo(sk
, flags
&MSG_DONTWAIT
);
1828 msg
->msg_namelen
= 0;
1830 /* Lock the socket to prevent queue disordering
1831 * while sleeps in memcpy_tomsg
1835 siocb
->scm
= &tmp_scm
;
1836 memset(&tmp_scm
, 0, sizeof(tmp_scm
));
1839 mutex_lock(&u
->readlock
);
1843 struct sk_buff
*skb
;
1845 unix_state_lock(sk
);
1846 skb
= skb_dequeue(&sk
->sk_receive_queue
);
1848 if (copied
>= target
)
1852 * POSIX 1003.1g mandates this order.
1855 err
= sock_error(sk
);
1858 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1861 unix_state_unlock(sk
);
1865 mutex_unlock(&u
->readlock
);
1867 timeo
= unix_stream_data_wait(sk
, timeo
);
1869 if (signal_pending(current
)) {
1870 err
= sock_intr_errno(timeo
);
1873 mutex_lock(&u
->readlock
);
1876 unix_state_unlock(sk
);
1879 unix_state_unlock(sk
);
1882 /* Never glue messages from different writers */
1883 if ((UNIXCB(skb
).pid
!= siocb
->scm
->pid
) ||
1884 (UNIXCB(skb
).cred
!= siocb
->scm
->cred
)) {
1885 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1889 /* Copy credentials */
1890 scm_set_cred(siocb
->scm
, UNIXCB(skb
).pid
, UNIXCB(skb
).cred
);
1894 /* Copy address just once */
1896 unix_copy_addr(msg
, skb
->sk
);
1900 chunk
= min_t(unsigned int, skb
->len
, size
);
1901 if (memcpy_toiovec(msg
->msg_iov
, skb
->data
, chunk
)) {
1902 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1910 /* Mark read part of skb as used */
1911 if (!(flags
& MSG_PEEK
)) {
1912 skb_pull(skb
, chunk
);
1915 unix_detach_fds(siocb
->scm
, skb
);
1917 /* put the skb back if we didn't use it up.. */
1919 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1928 /* It is questionable, see note in unix_dgram_recvmsg.
1931 siocb
->scm
->fp
= scm_fp_dup(UNIXCB(skb
).fp
);
1933 /* put message back and return */
1934 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1939 mutex_unlock(&u
->readlock
);
1940 scm_recv(sock
, msg
, siocb
->scm
, flags
);
1942 return copied
? : err
;
1945 static int unix_shutdown(struct socket
*sock
, int mode
)
1947 struct sock
*sk
= sock
->sk
;
1950 mode
= (mode
+1)&(RCV_SHUTDOWN
|SEND_SHUTDOWN
);
1953 unix_state_lock(sk
);
1954 sk
->sk_shutdown
|= mode
;
1955 other
= unix_peer(sk
);
1958 unix_state_unlock(sk
);
1959 sk
->sk_state_change(sk
);
1962 (sk
->sk_type
== SOCK_STREAM
|| sk
->sk_type
== SOCK_SEQPACKET
)) {
1966 if (mode
&RCV_SHUTDOWN
)
1967 peer_mode
|= SEND_SHUTDOWN
;
1968 if (mode
&SEND_SHUTDOWN
)
1969 peer_mode
|= RCV_SHUTDOWN
;
1970 unix_state_lock(other
);
1971 other
->sk_shutdown
|= peer_mode
;
1972 unix_state_unlock(other
);
1973 other
->sk_state_change(other
);
1974 if (peer_mode
== SHUTDOWN_MASK
)
1975 sk_wake_async(other
, SOCK_WAKE_WAITD
, POLL_HUP
);
1976 else if (peer_mode
& RCV_SHUTDOWN
)
1977 sk_wake_async(other
, SOCK_WAKE_WAITD
, POLL_IN
);
1985 static int unix_ioctl(struct socket
*sock
, unsigned int cmd
, unsigned long arg
)
1987 struct sock
*sk
= sock
->sk
;
1993 amount
= sk_wmem_alloc_get(sk
);
1994 err
= put_user(amount
, (int __user
*)arg
);
1998 struct sk_buff
*skb
;
2000 if (sk
->sk_state
== TCP_LISTEN
) {
2005 spin_lock(&sk
->sk_receive_queue
.lock
);
2006 if (sk
->sk_type
== SOCK_STREAM
||
2007 sk
->sk_type
== SOCK_SEQPACKET
) {
2008 skb_queue_walk(&sk
->sk_receive_queue
, skb
)
2011 skb
= skb_peek(&sk
->sk_receive_queue
);
2015 spin_unlock(&sk
->sk_receive_queue
.lock
);
2016 err
= put_user(amount
, (int __user
*)arg
);
2027 static unsigned int unix_poll(struct file
*file
, struct socket
*sock
, poll_table
*wait
)
2029 struct sock
*sk
= sock
->sk
;
2032 sock_poll_wait(file
, sk_sleep(sk
), wait
);
2035 /* exceptional events? */
2038 if (sk
->sk_shutdown
== SHUTDOWN_MASK
)
2040 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
2041 mask
|= POLLRDHUP
| POLLIN
| POLLRDNORM
;
2044 if (!skb_queue_empty(&sk
->sk_receive_queue
))
2045 mask
|= POLLIN
| POLLRDNORM
;
2047 /* Connection-based need to check for termination and startup */
2048 if ((sk
->sk_type
== SOCK_STREAM
|| sk
->sk_type
== SOCK_SEQPACKET
) &&
2049 sk
->sk_state
== TCP_CLOSE
)
2053 * we set writable also when the other side has shut down the
2054 * connection. This prevents stuck sockets.
2056 if (unix_writable(sk
))
2057 mask
|= POLLOUT
| POLLWRNORM
| POLLWRBAND
;
2062 static unsigned int unix_dgram_poll(struct file
*file
, struct socket
*sock
,
2065 struct sock
*sk
= sock
->sk
, *other
;
2066 unsigned int mask
, writable
;
2068 sock_poll_wait(file
, sk_sleep(sk
), wait
);
2071 /* exceptional events? */
2072 if (sk
->sk_err
|| !skb_queue_empty(&sk
->sk_error_queue
))
2074 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
2076 if (sk
->sk_shutdown
== SHUTDOWN_MASK
)
2080 if (!skb_queue_empty(&sk
->sk_receive_queue
) ||
2081 (sk
->sk_shutdown
& RCV_SHUTDOWN
))
2082 mask
|= POLLIN
| POLLRDNORM
;
2084 /* Connection-based need to check for termination and startup */
2085 if (sk
->sk_type
== SOCK_SEQPACKET
) {
2086 if (sk
->sk_state
== TCP_CLOSE
)
2088 /* connection hasn't started yet? */
2089 if (sk
->sk_state
== TCP_SYN_SENT
)
2094 writable
= unix_writable(sk
);
2096 other
= unix_peer_get(sk
);
2098 if (unix_peer(other
) != sk
) {
2099 sock_poll_wait(file
, &unix_sk(other
)->peer_wait
,
2101 if (unix_recvq_full(other
))
2110 mask
|= POLLOUT
| POLLWRNORM
| POLLWRBAND
;
2112 set_bit(SOCK_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
);
2117 #ifdef CONFIG_PROC_FS
2118 static struct sock
*first_unix_socket(int *i
)
2120 for (*i
= 0; *i
<= UNIX_HASH_SIZE
; (*i
)++) {
2121 if (!hlist_empty(&unix_socket_table
[*i
]))
2122 return __sk_head(&unix_socket_table
[*i
]);
2127 static struct sock
*next_unix_socket(int *i
, struct sock
*s
)
2129 struct sock
*next
= sk_next(s
);
2130 /* More in this chain? */
2133 /* Look for next non-empty chain. */
2134 for ((*i
)++; *i
<= UNIX_HASH_SIZE
; (*i
)++) {
2135 if (!hlist_empty(&unix_socket_table
[*i
]))
2136 return __sk_head(&unix_socket_table
[*i
]);
2141 struct unix_iter_state
{
2142 struct seq_net_private p
;
2146 static struct sock
*unix_seq_idx(struct seq_file
*seq
, loff_t pos
)
2148 struct unix_iter_state
*iter
= seq
->private;
2152 for (s
= first_unix_socket(&iter
->i
); s
; s
= next_unix_socket(&iter
->i
, s
)) {
2153 if (sock_net(s
) != seq_file_net(seq
))
2162 static void *unix_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2163 __acquires(unix_table_lock
)
2165 spin_lock(&unix_table_lock
);
2166 return *pos
? unix_seq_idx(seq
, *pos
- 1) : SEQ_START_TOKEN
;
2169 static void *unix_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2171 struct unix_iter_state
*iter
= seq
->private;
2172 struct sock
*sk
= v
;
2175 if (v
== SEQ_START_TOKEN
)
2176 sk
= first_unix_socket(&iter
->i
);
2178 sk
= next_unix_socket(&iter
->i
, sk
);
2179 while (sk
&& (sock_net(sk
) != seq_file_net(seq
)))
2180 sk
= next_unix_socket(&iter
->i
, sk
);
2184 static void unix_seq_stop(struct seq_file
*seq
, void *v
)
2185 __releases(unix_table_lock
)
2187 spin_unlock(&unix_table_lock
);
2190 static int unix_seq_show(struct seq_file
*seq
, void *v
)
2193 if (v
== SEQ_START_TOKEN
)
2194 seq_puts(seq
, "Num RefCount Protocol Flags Type St "
2198 struct unix_sock
*u
= unix_sk(s
);
2201 seq_printf(seq
, "%p: %08X %08X %08X %04X %02X %5lu",
2203 atomic_read(&s
->sk_refcnt
),
2205 s
->sk_state
== TCP_LISTEN
? __SO_ACCEPTCON
: 0,
2208 (s
->sk_state
== TCP_ESTABLISHED
? SS_CONNECTED
: SS_UNCONNECTED
) :
2209 (s
->sk_state
== TCP_ESTABLISHED
? SS_CONNECTING
: SS_DISCONNECTING
),
2217 len
= u
->addr
->len
- sizeof(short);
2218 if (!UNIX_ABSTRACT(s
))
2224 for ( ; i
< len
; i
++)
2225 seq_putc(seq
, u
->addr
->name
->sun_path
[i
]);
2227 unix_state_unlock(s
);
2228 seq_putc(seq
, '\n');
2234 static const struct seq_operations unix_seq_ops
= {
2235 .start
= unix_seq_start
,
2236 .next
= unix_seq_next
,
2237 .stop
= unix_seq_stop
,
2238 .show
= unix_seq_show
,
2241 static int unix_seq_open(struct inode
*inode
, struct file
*file
)
2243 return seq_open_net(inode
, file
, &unix_seq_ops
,
2244 sizeof(struct unix_iter_state
));
2247 static const struct file_operations unix_seq_fops
= {
2248 .owner
= THIS_MODULE
,
2249 .open
= unix_seq_open
,
2251 .llseek
= seq_lseek
,
2252 .release
= seq_release_net
,
2257 static const struct net_proto_family unix_family_ops
= {
2259 .create
= unix_create
,
2260 .owner
= THIS_MODULE
,
2264 static int __net_init
unix_net_init(struct net
*net
)
2266 int error
= -ENOMEM
;
2268 net
->unx
.sysctl_max_dgram_qlen
= 10;
2269 if (unix_sysctl_register(net
))
2272 #ifdef CONFIG_PROC_FS
2273 if (!proc_net_fops_create(net
, "unix", 0, &unix_seq_fops
)) {
2274 unix_sysctl_unregister(net
);
2283 static void __net_exit
unix_net_exit(struct net
*net
)
2285 unix_sysctl_unregister(net
);
2286 proc_net_remove(net
, "unix");
2289 static struct pernet_operations unix_net_ops
= {
2290 .init
= unix_net_init
,
2291 .exit
= unix_net_exit
,
2294 static int __init
af_unix_init(void)
2297 struct sk_buff
*dummy_skb
;
2299 BUILD_BUG_ON(sizeof(struct unix_skb_parms
) > sizeof(dummy_skb
->cb
));
2301 rc
= proto_register(&unix_proto
, 1);
2303 printk(KERN_CRIT
"%s: Cannot create unix_sock SLAB cache!\n",
2308 sock_register(&unix_family_ops
);
2309 register_pernet_subsys(&unix_net_ops
);
2314 static void __exit
af_unix_exit(void)
2316 sock_unregister(PF_UNIX
);
2317 proto_unregister(&unix_proto
);
2318 unregister_pernet_subsys(&unix_net_ops
);
2321 /* Earlier than device_initcall() so that other drivers invoking
2322 request_module() don't end up in a loop when modprobe tries
2323 to use a UNIX socket. But later than subsys_initcall() because
2324 we depend on stuff initialised there */
2325 fs_initcall(af_unix_init
);
2326 module_exit(af_unix_exit
);
2328 MODULE_LICENSE("GPL");
2329 MODULE_ALIAS_NETPROTO(PF_UNIX
);