netns xfrm: per-netns xfrm_policy_bydst hash
[linux-2.6/linux-loongson.git] / net / unix / af_unix.c
blob3a35a6e8bf918aca54689fe945bd7e667fecd973
1 /*
2 * NET4: Implementation of BSD Unix domain sockets.
4 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 * Fixes:
12 * Linus Torvalds : Assorted bug cures.
13 * Niibe Yutaka : async I/O support.
14 * Carsten Paeth : PF_UNIX check, address fixes.
15 * Alan Cox : Limit size of allocated blocks.
16 * Alan Cox : Fixed the stupid socketpair bug.
17 * Alan Cox : BSD compatibility fine tuning.
18 * Alan Cox : Fixed a bug in connect when interrupted.
19 * Alan Cox : Sorted out a proper draft version of
20 * file descriptor passing hacked up from
21 * Mike Shaver's work.
22 * Marty Leisner : Fixes to fd passing
23 * Nick Nevin : recvmsg bugfix.
24 * Alan Cox : Started proper garbage collector
25 * Heiko EiBfeldt : Missing verify_area check
26 * Alan Cox : Started POSIXisms
27 * Andreas Schwab : Replace inode by dentry for proper
28 * reference counting
29 * Kirk Petersen : Made this a module
30 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
31 * Lots of bug fixes.
32 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
33 * by above two patches.
34 * Andrea Arcangeli : If possible we block in connect(2)
35 * if the max backlog of the listen socket
36 * is been reached. This won't break
37 * old apps and it will avoid huge amount
38 * of socks hashed (this for unix_gc()
39 * performances reasons).
40 * Security fix that limits the max
41 * number of socks to 2*max_files and
42 * the number of skb queueable in the
43 * dgram receiver.
44 * Artur Skawina : Hash function optimizations
45 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
46 * Malcolm Beattie : Set peercred for socketpair
47 * Michal Ostrowski : Module initialization cleanup.
48 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
49 * the core infrastructure is doing that
50 * for all net proto families now (2.5.69+)
53 * Known differences from reference BSD that was tested:
55 * [TO FIX]
56 * ECONNREFUSED is not returned from one end of a connected() socket to the
57 * other the moment one end closes.
58 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
59 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
60 * [NOT TO FIX]
61 * accept() returns a path name even if the connecting socket has closed
62 * in the meantime (BSD loses the path and gives up).
63 * accept() returns 0 length path for an unbound connector. BSD returns 16
64 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
66 * BSD af_unix apparently has connect forgetting to block properly.
67 * (need to check this with the POSIX spec in detail)
69 * Differences from 2.0.0-11-... (ANK)
70 * Bug fixes and improvements.
71 * - client shutdown killed server socket.
72 * - removed all useless cli/sti pairs.
74 * Semantic changes/extensions.
75 * - generic control message passing.
76 * - SCM_CREDENTIALS control message.
77 * - "Abstract" (not FS based) socket bindings.
78 * Abstract names are sequences of bytes (not zero terminated)
79 * started by 0, so that this name space does not intersect
80 * with BSD names.
83 #include <linux/module.h>
84 #include <linux/kernel.h>
85 #include <linux/signal.h>
86 #include <linux/sched.h>
87 #include <linux/errno.h>
88 #include <linux/string.h>
89 #include <linux/stat.h>
90 #include <linux/dcache.h>
91 #include <linux/namei.h>
92 #include <linux/socket.h>
93 #include <linux/un.h>
94 #include <linux/fcntl.h>
95 #include <linux/termios.h>
96 #include <linux/sockios.h>
97 #include <linux/net.h>
98 #include <linux/in.h>
99 #include <linux/fs.h>
100 #include <linux/slab.h>
101 #include <asm/uaccess.h>
102 #include <linux/skbuff.h>
103 #include <linux/netdevice.h>
104 #include <net/net_namespace.h>
105 #include <net/sock.h>
106 #include <net/tcp_states.h>
107 #include <net/af_unix.h>
108 #include <linux/proc_fs.h>
109 #include <linux/seq_file.h>
110 #include <net/scm.h>
111 #include <linux/init.h>
112 #include <linux/poll.h>
113 #include <linux/rtnetlink.h>
114 #include <linux/mount.h>
115 #include <net/checksum.h>
116 #include <linux/security.h>
118 static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
119 static DEFINE_SPINLOCK(unix_table_lock);
120 static atomic_t unix_nr_socks = ATOMIC_INIT(0);
122 #define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE])
124 #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
126 #ifdef CONFIG_SECURITY_NETWORK
127 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
129 memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
132 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
134 scm->secid = *UNIXSID(skb);
136 #else
137 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
140 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
142 #endif /* CONFIG_SECURITY_NETWORK */
145 * SMP locking strategy:
146 * hash table is protected with spinlock unix_table_lock
147 * each socket state is protected by separate rwlock.
150 static inline unsigned unix_hash_fold(__wsum n)
152 unsigned hash = (__force unsigned)n;
153 hash ^= hash>>16;
154 hash ^= hash>>8;
155 return hash&(UNIX_HASH_SIZE-1);
158 #define unix_peer(sk) (unix_sk(sk)->peer)
160 static inline int unix_our_peer(struct sock *sk, struct sock *osk)
162 return unix_peer(osk) == sk;
165 static inline int unix_may_send(struct sock *sk, struct sock *osk)
167 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
170 static inline int unix_recvq_full(struct sock const *sk)
172 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
175 static struct sock *unix_peer_get(struct sock *s)
177 struct sock *peer;
179 unix_state_lock(s);
180 peer = unix_peer(s);
181 if (peer)
182 sock_hold(peer);
183 unix_state_unlock(s);
184 return peer;
187 static inline void unix_release_addr(struct unix_address *addr)
189 if (atomic_dec_and_test(&addr->refcnt))
190 kfree(addr);
194 * Check unix socket name:
195 * - should be not zero length.
196 * - if started by not zero, should be NULL terminated (FS object)
197 * - if started by zero, it is abstract name.
200 static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned *hashp)
202 if (len <= sizeof(short) || len > sizeof(*sunaddr))
203 return -EINVAL;
204 if (!sunaddr || sunaddr->sun_family != AF_UNIX)
205 return -EINVAL;
206 if (sunaddr->sun_path[0]) {
208 * This may look like an off by one error but it is a bit more
209 * subtle. 108 is the longest valid AF_UNIX path for a binding.
210 * sun_path[108] doesnt as such exist. However in kernel space
211 * we are guaranteed that it is a valid memory location in our
212 * kernel address buffer.
214 ((char *)sunaddr)[len] = 0;
215 len = strlen(sunaddr->sun_path)+1+sizeof(short);
216 return len;
219 *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
220 return len;
223 static void __unix_remove_socket(struct sock *sk)
225 sk_del_node_init(sk);
228 static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
230 WARN_ON(!sk_unhashed(sk));
231 sk_add_node(sk, list);
234 static inline void unix_remove_socket(struct sock *sk)
236 spin_lock(&unix_table_lock);
237 __unix_remove_socket(sk);
238 spin_unlock(&unix_table_lock);
241 static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
243 spin_lock(&unix_table_lock);
244 __unix_insert_socket(list, sk);
245 spin_unlock(&unix_table_lock);
248 static struct sock *__unix_find_socket_byname(struct net *net,
249 struct sockaddr_un *sunname,
250 int len, int type, unsigned hash)
252 struct sock *s;
253 struct hlist_node *node;
255 sk_for_each(s, node, &unix_socket_table[hash ^ type]) {
256 struct unix_sock *u = unix_sk(s);
258 if (!net_eq(sock_net(s), net))
259 continue;
261 if (u->addr->len == len &&
262 !memcmp(u->addr->name, sunname, len))
263 goto found;
265 s = NULL;
266 found:
267 return s;
270 static inline struct sock *unix_find_socket_byname(struct net *net,
271 struct sockaddr_un *sunname,
272 int len, int type,
273 unsigned hash)
275 struct sock *s;
277 spin_lock(&unix_table_lock);
278 s = __unix_find_socket_byname(net, sunname, len, type, hash);
279 if (s)
280 sock_hold(s);
281 spin_unlock(&unix_table_lock);
282 return s;
285 static struct sock *unix_find_socket_byinode(struct net *net, struct inode *i)
287 struct sock *s;
288 struct hlist_node *node;
290 spin_lock(&unix_table_lock);
291 sk_for_each(s, node,
292 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
293 struct dentry *dentry = unix_sk(s)->dentry;
295 if (!net_eq(sock_net(s), net))
296 continue;
298 if (dentry && dentry->d_inode == i) {
299 sock_hold(s);
300 goto found;
303 s = NULL;
304 found:
305 spin_unlock(&unix_table_lock);
306 return s;
309 static inline int unix_writable(struct sock *sk)
311 return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
314 static void unix_write_space(struct sock *sk)
316 read_lock(&sk->sk_callback_lock);
317 if (unix_writable(sk)) {
318 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
319 wake_up_interruptible_sync(sk->sk_sleep);
320 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
322 read_unlock(&sk->sk_callback_lock);
325 /* When dgram socket disconnects (or changes its peer), we clear its receive
326 * queue of packets arrived from previous peer. First, it allows to do
327 * flow control based only on wmem_alloc; second, sk connected to peer
328 * may receive messages only from that peer. */
329 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
331 if (!skb_queue_empty(&sk->sk_receive_queue)) {
332 skb_queue_purge(&sk->sk_receive_queue);
333 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
335 /* If one link of bidirectional dgram pipe is disconnected,
336 * we signal error. Messages are lost. Do not make this,
337 * when peer was not connected to us.
339 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
340 other->sk_err = ECONNRESET;
341 other->sk_error_report(other);
346 static void unix_sock_destructor(struct sock *sk)
348 struct unix_sock *u = unix_sk(sk);
350 skb_queue_purge(&sk->sk_receive_queue);
352 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
353 WARN_ON(!sk_unhashed(sk));
354 WARN_ON(sk->sk_socket);
355 if (!sock_flag(sk, SOCK_DEAD)) {
356 printk(KERN_INFO "Attempt to release alive unix socket: %p\n", sk);
357 return;
360 if (u->addr)
361 unix_release_addr(u->addr);
363 atomic_dec(&unix_nr_socks);
364 local_bh_disable();
365 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
366 local_bh_enable();
367 #ifdef UNIX_REFCNT_DEBUG
368 printk(KERN_DEBUG "UNIX %p is destroyed, %d are still alive.\n", sk,
369 atomic_read(&unix_nr_socks));
370 #endif
373 static int unix_release_sock(struct sock *sk, int embrion)
375 struct unix_sock *u = unix_sk(sk);
376 struct dentry *dentry;
377 struct vfsmount *mnt;
378 struct sock *skpair;
379 struct sk_buff *skb;
380 int state;
382 unix_remove_socket(sk);
384 /* Clear state */
385 unix_state_lock(sk);
386 sock_orphan(sk);
387 sk->sk_shutdown = SHUTDOWN_MASK;
388 dentry = u->dentry;
389 u->dentry = NULL;
390 mnt = u->mnt;
391 u->mnt = NULL;
392 state = sk->sk_state;
393 sk->sk_state = TCP_CLOSE;
394 unix_state_unlock(sk);
396 wake_up_interruptible_all(&u->peer_wait);
398 skpair = unix_peer(sk);
400 if (skpair != NULL) {
401 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
402 unix_state_lock(skpair);
403 /* No more writes */
404 skpair->sk_shutdown = SHUTDOWN_MASK;
405 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
406 skpair->sk_err = ECONNRESET;
407 unix_state_unlock(skpair);
408 skpair->sk_state_change(skpair);
409 read_lock(&skpair->sk_callback_lock);
410 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
411 read_unlock(&skpair->sk_callback_lock);
413 sock_put(skpair); /* It may now die */
414 unix_peer(sk) = NULL;
417 /* Try to flush out this socket. Throw out buffers at least */
419 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
420 if (state == TCP_LISTEN)
421 unix_release_sock(skb->sk, 1);
422 /* passed fds are erased in the kfree_skb hook */
423 kfree_skb(skb);
426 if (dentry) {
427 dput(dentry);
428 mntput(mnt);
431 sock_put(sk);
433 /* ---- Socket is dead now and most probably destroyed ---- */
436 * Fixme: BSD difference: In BSD all sockets connected to use get
437 * ECONNRESET and we die on the spot. In Linux we behave
438 * like files and pipes do and wait for the last
439 * dereference.
441 * Can't we simply set sock->err?
443 * What the above comment does talk about? --ANK(980817)
446 if (unix_tot_inflight)
447 unix_gc(); /* Garbage collect fds */
449 return 0;
452 static int unix_listen(struct socket *sock, int backlog)
454 int err;
455 struct sock *sk = sock->sk;
456 struct unix_sock *u = unix_sk(sk);
458 err = -EOPNOTSUPP;
459 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
460 goto out; /* Only stream/seqpacket sockets accept */
461 err = -EINVAL;
462 if (!u->addr)
463 goto out; /* No listens on an unbound socket */
464 unix_state_lock(sk);
465 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
466 goto out_unlock;
467 if (backlog > sk->sk_max_ack_backlog)
468 wake_up_interruptible_all(&u->peer_wait);
469 sk->sk_max_ack_backlog = backlog;
470 sk->sk_state = TCP_LISTEN;
471 /* set credentials so connect can copy them */
472 sk->sk_peercred.pid = task_tgid_vnr(current);
473 sk->sk_peercred.uid = current->euid;
474 sk->sk_peercred.gid = current->egid;
475 err = 0;
477 out_unlock:
478 unix_state_unlock(sk);
479 out:
480 return err;
483 static int unix_release(struct socket *);
484 static int unix_bind(struct socket *, struct sockaddr *, int);
485 static int unix_stream_connect(struct socket *, struct sockaddr *,
486 int addr_len, int flags);
487 static int unix_socketpair(struct socket *, struct socket *);
488 static int unix_accept(struct socket *, struct socket *, int);
489 static int unix_getname(struct socket *, struct sockaddr *, int *, int);
490 static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
491 static unsigned int unix_dgram_poll(struct file *, struct socket *,
492 poll_table *);
493 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
494 static int unix_shutdown(struct socket *, int);
495 static int unix_stream_sendmsg(struct kiocb *, struct socket *,
496 struct msghdr *, size_t);
497 static int unix_stream_recvmsg(struct kiocb *, struct socket *,
498 struct msghdr *, size_t, int);
499 static int unix_dgram_sendmsg(struct kiocb *, struct socket *,
500 struct msghdr *, size_t);
501 static int unix_dgram_recvmsg(struct kiocb *, struct socket *,
502 struct msghdr *, size_t, int);
503 static int unix_dgram_connect(struct socket *, struct sockaddr *,
504 int, int);
505 static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
506 struct msghdr *, size_t);
508 static const struct proto_ops unix_stream_ops = {
509 .family = PF_UNIX,
510 .owner = THIS_MODULE,
511 .release = unix_release,
512 .bind = unix_bind,
513 .connect = unix_stream_connect,
514 .socketpair = unix_socketpair,
515 .accept = unix_accept,
516 .getname = unix_getname,
517 .poll = unix_poll,
518 .ioctl = unix_ioctl,
519 .listen = unix_listen,
520 .shutdown = unix_shutdown,
521 .setsockopt = sock_no_setsockopt,
522 .getsockopt = sock_no_getsockopt,
523 .sendmsg = unix_stream_sendmsg,
524 .recvmsg = unix_stream_recvmsg,
525 .mmap = sock_no_mmap,
526 .sendpage = sock_no_sendpage,
529 static const struct proto_ops unix_dgram_ops = {
530 .family = PF_UNIX,
531 .owner = THIS_MODULE,
532 .release = unix_release,
533 .bind = unix_bind,
534 .connect = unix_dgram_connect,
535 .socketpair = unix_socketpair,
536 .accept = sock_no_accept,
537 .getname = unix_getname,
538 .poll = unix_dgram_poll,
539 .ioctl = unix_ioctl,
540 .listen = sock_no_listen,
541 .shutdown = unix_shutdown,
542 .setsockopt = sock_no_setsockopt,
543 .getsockopt = sock_no_getsockopt,
544 .sendmsg = unix_dgram_sendmsg,
545 .recvmsg = unix_dgram_recvmsg,
546 .mmap = sock_no_mmap,
547 .sendpage = sock_no_sendpage,
550 static const struct proto_ops unix_seqpacket_ops = {
551 .family = PF_UNIX,
552 .owner = THIS_MODULE,
553 .release = unix_release,
554 .bind = unix_bind,
555 .connect = unix_stream_connect,
556 .socketpair = unix_socketpair,
557 .accept = unix_accept,
558 .getname = unix_getname,
559 .poll = unix_dgram_poll,
560 .ioctl = unix_ioctl,
561 .listen = unix_listen,
562 .shutdown = unix_shutdown,
563 .setsockopt = sock_no_setsockopt,
564 .getsockopt = sock_no_getsockopt,
565 .sendmsg = unix_seqpacket_sendmsg,
566 .recvmsg = unix_dgram_recvmsg,
567 .mmap = sock_no_mmap,
568 .sendpage = sock_no_sendpage,
571 static struct proto unix_proto = {
572 .name = "UNIX",
573 .owner = THIS_MODULE,
574 .sockets_allocated = &unix_nr_socks,
575 .obj_size = sizeof(struct unix_sock),
579 * AF_UNIX sockets do not interact with hardware, hence they
580 * dont trigger interrupts - so it's safe for them to have
581 * bh-unsafe locking for their sk_receive_queue.lock. Split off
582 * this special lock-class by reinitializing the spinlock key:
584 static struct lock_class_key af_unix_sk_receive_queue_lock_key;
586 static struct sock *unix_create1(struct net *net, struct socket *sock)
588 struct sock *sk = NULL;
589 struct unix_sock *u;
591 atomic_inc(&unix_nr_socks);
592 if (atomic_read(&unix_nr_socks) > 2 * get_max_files())
593 goto out;
595 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
596 if (!sk)
597 goto out;
599 sock_init_data(sock, sk);
600 lockdep_set_class(&sk->sk_receive_queue.lock,
601 &af_unix_sk_receive_queue_lock_key);
603 sk->sk_write_space = unix_write_space;
604 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
605 sk->sk_destruct = unix_sock_destructor;
606 u = unix_sk(sk);
607 u->dentry = NULL;
608 u->mnt = NULL;
609 spin_lock_init(&u->lock);
610 atomic_long_set(&u->inflight, 0);
611 INIT_LIST_HEAD(&u->link);
612 mutex_init(&u->readlock); /* single task reading lock */
613 init_waitqueue_head(&u->peer_wait);
614 unix_insert_socket(unix_sockets_unbound, sk);
615 out:
616 if (sk == NULL)
617 atomic_dec(&unix_nr_socks);
618 else {
619 local_bh_disable();
620 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
621 local_bh_enable();
623 return sk;
626 static int unix_create(struct net *net, struct socket *sock, int protocol)
628 if (protocol && protocol != PF_UNIX)
629 return -EPROTONOSUPPORT;
631 sock->state = SS_UNCONNECTED;
633 switch (sock->type) {
634 case SOCK_STREAM:
635 sock->ops = &unix_stream_ops;
636 break;
638 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
639 * nothing uses it.
641 case SOCK_RAW:
642 sock->type = SOCK_DGRAM;
643 case SOCK_DGRAM:
644 sock->ops = &unix_dgram_ops;
645 break;
646 case SOCK_SEQPACKET:
647 sock->ops = &unix_seqpacket_ops;
648 break;
649 default:
650 return -ESOCKTNOSUPPORT;
653 return unix_create1(net, sock) ? 0 : -ENOMEM;
656 static int unix_release(struct socket *sock)
658 struct sock *sk = sock->sk;
660 if (!sk)
661 return 0;
663 sock->sk = NULL;
665 return unix_release_sock(sk, 0);
668 static int unix_autobind(struct socket *sock)
670 struct sock *sk = sock->sk;
671 struct net *net = sock_net(sk);
672 struct unix_sock *u = unix_sk(sk);
673 static u32 ordernum = 1;
674 struct unix_address *addr;
675 int err;
677 mutex_lock(&u->readlock);
679 err = 0;
680 if (u->addr)
681 goto out;
683 err = -ENOMEM;
684 addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
685 if (!addr)
686 goto out;
688 addr->name->sun_family = AF_UNIX;
689 atomic_set(&addr->refcnt, 1);
691 retry:
692 addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
693 addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
695 spin_lock(&unix_table_lock);
696 ordernum = (ordernum+1)&0xFFFFF;
698 if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
699 addr->hash)) {
700 spin_unlock(&unix_table_lock);
701 /* Sanity yield. It is unusual case, but yet... */
702 if (!(ordernum&0xFF))
703 yield();
704 goto retry;
706 addr->hash ^= sk->sk_type;
708 __unix_remove_socket(sk);
709 u->addr = addr;
710 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
711 spin_unlock(&unix_table_lock);
712 err = 0;
714 out: mutex_unlock(&u->readlock);
715 return err;
718 static struct sock *unix_find_other(struct net *net,
719 struct sockaddr_un *sunname, int len,
720 int type, unsigned hash, int *error)
722 struct sock *u;
723 struct path path;
724 int err = 0;
726 if (sunname->sun_path[0]) {
727 struct inode *inode;
728 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
729 if (err)
730 goto fail;
731 inode = path.dentry->d_inode;
732 err = inode_permission(inode, MAY_WRITE);
733 if (err)
734 goto put_fail;
736 err = -ECONNREFUSED;
737 if (!S_ISSOCK(inode->i_mode))
738 goto put_fail;
739 u = unix_find_socket_byinode(net, inode);
740 if (!u)
741 goto put_fail;
743 if (u->sk_type == type)
744 touch_atime(path.mnt, path.dentry);
746 path_put(&path);
748 err = -EPROTOTYPE;
749 if (u->sk_type != type) {
750 sock_put(u);
751 goto fail;
753 } else {
754 err = -ECONNREFUSED;
755 u = unix_find_socket_byname(net, sunname, len, type, hash);
756 if (u) {
757 struct dentry *dentry;
758 dentry = unix_sk(u)->dentry;
759 if (dentry)
760 touch_atime(unix_sk(u)->mnt, dentry);
761 } else
762 goto fail;
764 return u;
766 put_fail:
767 path_put(&path);
768 fail:
769 *error = err;
770 return NULL;
774 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
776 struct sock *sk = sock->sk;
777 struct net *net = sock_net(sk);
778 struct unix_sock *u = unix_sk(sk);
779 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
780 struct dentry *dentry = NULL;
781 struct nameidata nd;
782 int err;
783 unsigned hash;
784 struct unix_address *addr;
785 struct hlist_head *list;
787 err = -EINVAL;
788 if (sunaddr->sun_family != AF_UNIX)
789 goto out;
791 if (addr_len == sizeof(short)) {
792 err = unix_autobind(sock);
793 goto out;
796 err = unix_mkname(sunaddr, addr_len, &hash);
797 if (err < 0)
798 goto out;
799 addr_len = err;
801 mutex_lock(&u->readlock);
803 err = -EINVAL;
804 if (u->addr)
805 goto out_up;
807 err = -ENOMEM;
808 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
809 if (!addr)
810 goto out_up;
812 memcpy(addr->name, sunaddr, addr_len);
813 addr->len = addr_len;
814 addr->hash = hash ^ sk->sk_type;
815 atomic_set(&addr->refcnt, 1);
817 if (sunaddr->sun_path[0]) {
818 unsigned int mode;
819 err = 0;
821 * Get the parent directory, calculate the hash for last
822 * component.
824 err = path_lookup(sunaddr->sun_path, LOOKUP_PARENT, &nd);
825 if (err)
826 goto out_mknod_parent;
828 dentry = lookup_create(&nd, 0);
829 err = PTR_ERR(dentry);
830 if (IS_ERR(dentry))
831 goto out_mknod_unlock;
834 * All right, let's create it.
836 mode = S_IFSOCK |
837 (SOCK_INODE(sock)->i_mode & ~current->fs->umask);
838 err = mnt_want_write(nd.path.mnt);
839 if (err)
840 goto out_mknod_dput;
841 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
842 mnt_drop_write(nd.path.mnt);
843 if (err)
844 goto out_mknod_dput;
845 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
846 dput(nd.path.dentry);
847 nd.path.dentry = dentry;
849 addr->hash = UNIX_HASH_SIZE;
852 spin_lock(&unix_table_lock);
854 if (!sunaddr->sun_path[0]) {
855 err = -EADDRINUSE;
856 if (__unix_find_socket_byname(net, sunaddr, addr_len,
857 sk->sk_type, hash)) {
858 unix_release_addr(addr);
859 goto out_unlock;
862 list = &unix_socket_table[addr->hash];
863 } else {
864 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
865 u->dentry = nd.path.dentry;
866 u->mnt = nd.path.mnt;
869 err = 0;
870 __unix_remove_socket(sk);
871 u->addr = addr;
872 __unix_insert_socket(list, sk);
874 out_unlock:
875 spin_unlock(&unix_table_lock);
876 out_up:
877 mutex_unlock(&u->readlock);
878 out:
879 return err;
881 out_mknod_dput:
882 dput(dentry);
883 out_mknod_unlock:
884 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
885 path_put(&nd.path);
886 out_mknod_parent:
887 if (err == -EEXIST)
888 err = -EADDRINUSE;
889 unix_release_addr(addr);
890 goto out_up;
893 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
895 if (unlikely(sk1 == sk2) || !sk2) {
896 unix_state_lock(sk1);
897 return;
899 if (sk1 < sk2) {
900 unix_state_lock(sk1);
901 unix_state_lock_nested(sk2);
902 } else {
903 unix_state_lock(sk2);
904 unix_state_lock_nested(sk1);
908 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
910 if (unlikely(sk1 == sk2) || !sk2) {
911 unix_state_unlock(sk1);
912 return;
914 unix_state_unlock(sk1);
915 unix_state_unlock(sk2);
918 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
919 int alen, int flags)
921 struct sock *sk = sock->sk;
922 struct net *net = sock_net(sk);
923 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
924 struct sock *other;
925 unsigned hash;
926 int err;
928 if (addr->sa_family != AF_UNSPEC) {
929 err = unix_mkname(sunaddr, alen, &hash);
930 if (err < 0)
931 goto out;
932 alen = err;
934 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
935 !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
936 goto out;
938 restart:
939 other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
940 if (!other)
941 goto out;
943 unix_state_double_lock(sk, other);
945 /* Apparently VFS overslept socket death. Retry. */
946 if (sock_flag(other, SOCK_DEAD)) {
947 unix_state_double_unlock(sk, other);
948 sock_put(other);
949 goto restart;
952 err = -EPERM;
953 if (!unix_may_send(sk, other))
954 goto out_unlock;
956 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
957 if (err)
958 goto out_unlock;
960 } else {
962 * 1003.1g breaking connected state with AF_UNSPEC
964 other = NULL;
965 unix_state_double_lock(sk, other);
969 * If it was connected, reconnect.
971 if (unix_peer(sk)) {
972 struct sock *old_peer = unix_peer(sk);
973 unix_peer(sk) = other;
974 unix_state_double_unlock(sk, other);
976 if (other != old_peer)
977 unix_dgram_disconnected(sk, old_peer);
978 sock_put(old_peer);
979 } else {
980 unix_peer(sk) = other;
981 unix_state_double_unlock(sk, other);
983 return 0;
985 out_unlock:
986 unix_state_double_unlock(sk, other);
987 sock_put(other);
988 out:
989 return err;
992 static long unix_wait_for_peer(struct sock *other, long timeo)
994 struct unix_sock *u = unix_sk(other);
995 int sched;
996 DEFINE_WAIT(wait);
998 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1000 sched = !sock_flag(other, SOCK_DEAD) &&
1001 !(other->sk_shutdown & RCV_SHUTDOWN) &&
1002 unix_recvq_full(other);
1004 unix_state_unlock(other);
1006 if (sched)
1007 timeo = schedule_timeout(timeo);
1009 finish_wait(&u->peer_wait, &wait);
1010 return timeo;
1013 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1014 int addr_len, int flags)
1016 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1017 struct sock *sk = sock->sk;
1018 struct net *net = sock_net(sk);
1019 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1020 struct sock *newsk = NULL;
1021 struct sock *other = NULL;
1022 struct sk_buff *skb = NULL;
1023 unsigned hash;
1024 int st;
1025 int err;
1026 long timeo;
1028 err = unix_mkname(sunaddr, addr_len, &hash);
1029 if (err < 0)
1030 goto out;
1031 addr_len = err;
1033 if (test_bit(SOCK_PASSCRED, &sock->flags)
1034 && !u->addr && (err = unix_autobind(sock)) != 0)
1035 goto out;
1037 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1039 /* First of all allocate resources.
1040 If we will make it after state is locked,
1041 we will have to recheck all again in any case.
1044 err = -ENOMEM;
1046 /* create new sock for complete connection */
1047 newsk = unix_create1(sock_net(sk), NULL);
1048 if (newsk == NULL)
1049 goto out;
1051 /* Allocate skb for sending to listening sock */
1052 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1053 if (skb == NULL)
1054 goto out;
1056 restart:
1057 /* Find listening sock. */
1058 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
1059 if (!other)
1060 goto out;
1062 /* Latch state of peer */
1063 unix_state_lock(other);
1065 /* Apparently VFS overslept socket death. Retry. */
1066 if (sock_flag(other, SOCK_DEAD)) {
1067 unix_state_unlock(other);
1068 sock_put(other);
1069 goto restart;
1072 err = -ECONNREFUSED;
1073 if (other->sk_state != TCP_LISTEN)
1074 goto out_unlock;
1076 if (unix_recvq_full(other)) {
1077 err = -EAGAIN;
1078 if (!timeo)
1079 goto out_unlock;
1081 timeo = unix_wait_for_peer(other, timeo);
1083 err = sock_intr_errno(timeo);
1084 if (signal_pending(current))
1085 goto out;
1086 sock_put(other);
1087 goto restart;
1090 /* Latch our state.
1092 It is tricky place. We need to grab write lock and cannot
1093 drop lock on peer. It is dangerous because deadlock is
1094 possible. Connect to self case and simultaneous
1095 attempt to connect are eliminated by checking socket
1096 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1097 check this before attempt to grab lock.
1099 Well, and we have to recheck the state after socket locked.
1101 st = sk->sk_state;
1103 switch (st) {
1104 case TCP_CLOSE:
1105 /* This is ok... continue with connect */
1106 break;
1107 case TCP_ESTABLISHED:
1108 /* Socket is already connected */
1109 err = -EISCONN;
1110 goto out_unlock;
1111 default:
1112 err = -EINVAL;
1113 goto out_unlock;
1116 unix_state_lock_nested(sk);
1118 if (sk->sk_state != st) {
1119 unix_state_unlock(sk);
1120 unix_state_unlock(other);
1121 sock_put(other);
1122 goto restart;
1125 err = security_unix_stream_connect(sock, other->sk_socket, newsk);
1126 if (err) {
1127 unix_state_unlock(sk);
1128 goto out_unlock;
1131 /* The way is open! Fastly set all the necessary fields... */
1133 sock_hold(sk);
1134 unix_peer(newsk) = sk;
1135 newsk->sk_state = TCP_ESTABLISHED;
1136 newsk->sk_type = sk->sk_type;
1137 newsk->sk_peercred.pid = task_tgid_vnr(current);
1138 newsk->sk_peercred.uid = current->euid;
1139 newsk->sk_peercred.gid = current->egid;
1140 newu = unix_sk(newsk);
1141 newsk->sk_sleep = &newu->peer_wait;
1142 otheru = unix_sk(other);
1144 /* copy address information from listening to new sock*/
1145 if (otheru->addr) {
1146 atomic_inc(&otheru->addr->refcnt);
1147 newu->addr = otheru->addr;
1149 if (otheru->dentry) {
1150 newu->dentry = dget(otheru->dentry);
1151 newu->mnt = mntget(otheru->mnt);
1154 /* Set credentials */
1155 sk->sk_peercred = other->sk_peercred;
1157 sock->state = SS_CONNECTED;
1158 sk->sk_state = TCP_ESTABLISHED;
1159 sock_hold(newsk);
1161 smp_mb__after_atomic_inc(); /* sock_hold() does an atomic_inc() */
1162 unix_peer(sk) = newsk;
1164 unix_state_unlock(sk);
1166 /* take ten and and send info to listening sock */
1167 spin_lock(&other->sk_receive_queue.lock);
1168 __skb_queue_tail(&other->sk_receive_queue, skb);
1169 spin_unlock(&other->sk_receive_queue.lock);
1170 unix_state_unlock(other);
1171 other->sk_data_ready(other, 0);
1172 sock_put(other);
1173 return 0;
1175 out_unlock:
1176 if (other)
1177 unix_state_unlock(other);
1179 out:
1180 if (skb)
1181 kfree_skb(skb);
1182 if (newsk)
1183 unix_release_sock(newsk, 0);
1184 if (other)
1185 sock_put(other);
1186 return err;
1189 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1191 struct sock *ska = socka->sk, *skb = sockb->sk;
1193 /* Join our sockets back to back */
1194 sock_hold(ska);
1195 sock_hold(skb);
1196 unix_peer(ska) = skb;
1197 unix_peer(skb) = ska;
1198 ska->sk_peercred.pid = skb->sk_peercred.pid = task_tgid_vnr(current);
1199 ska->sk_peercred.uid = skb->sk_peercred.uid = current->euid;
1200 ska->sk_peercred.gid = skb->sk_peercred.gid = current->egid;
1202 if (ska->sk_type != SOCK_DGRAM) {
1203 ska->sk_state = TCP_ESTABLISHED;
1204 skb->sk_state = TCP_ESTABLISHED;
1205 socka->state = SS_CONNECTED;
1206 sockb->state = SS_CONNECTED;
1208 return 0;
1211 static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1213 struct sock *sk = sock->sk;
1214 struct sock *tsk;
1215 struct sk_buff *skb;
1216 int err;
1218 err = -EOPNOTSUPP;
1219 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1220 goto out;
1222 err = -EINVAL;
1223 if (sk->sk_state != TCP_LISTEN)
1224 goto out;
1226 /* If socket state is TCP_LISTEN it cannot change (for now...),
1227 * so that no locks are necessary.
1230 skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1231 if (!skb) {
1232 /* This means receive shutdown. */
1233 if (err == 0)
1234 err = -EINVAL;
1235 goto out;
1238 tsk = skb->sk;
1239 skb_free_datagram(sk, skb);
1240 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1242 /* attach accepted sock to socket */
1243 unix_state_lock(tsk);
1244 newsock->state = SS_CONNECTED;
1245 sock_graft(tsk, newsock);
1246 unix_state_unlock(tsk);
1247 return 0;
1249 out:
1250 return err;
1254 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1256 struct sock *sk = sock->sk;
1257 struct unix_sock *u;
1258 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1259 int err = 0;
1261 if (peer) {
1262 sk = unix_peer_get(sk);
1264 err = -ENOTCONN;
1265 if (!sk)
1266 goto out;
1267 err = 0;
1268 } else {
1269 sock_hold(sk);
1272 u = unix_sk(sk);
1273 unix_state_lock(sk);
1274 if (!u->addr) {
1275 sunaddr->sun_family = AF_UNIX;
1276 sunaddr->sun_path[0] = 0;
1277 *uaddr_len = sizeof(short);
1278 } else {
1279 struct unix_address *addr = u->addr;
1281 *uaddr_len = addr->len;
1282 memcpy(sunaddr, addr->name, *uaddr_len);
1284 unix_state_unlock(sk);
1285 sock_put(sk);
1286 out:
1287 return err;
1290 static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1292 int i;
1294 scm->fp = UNIXCB(skb).fp;
1295 skb->destructor = sock_wfree;
1296 UNIXCB(skb).fp = NULL;
1298 for (i = scm->fp->count-1; i >= 0; i--)
1299 unix_notinflight(scm->fp->fp[i]);
1302 static void unix_destruct_fds(struct sk_buff *skb)
1304 struct scm_cookie scm;
1305 memset(&scm, 0, sizeof(scm));
1306 unix_detach_fds(&scm, skb);
1308 /* Alas, it calls VFS */
1309 /* So fscking what? fput() had been SMP-safe since the last Summer */
1310 scm_destroy(&scm);
1311 sock_wfree(skb);
1314 static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1316 int i;
1319 * Need to duplicate file references for the sake of garbage
1320 * collection. Otherwise a socket in the fps might become a
1321 * candidate for GC while the skb is not yet queued.
1323 UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1324 if (!UNIXCB(skb).fp)
1325 return -ENOMEM;
1327 for (i = scm->fp->count-1; i >= 0; i--)
1328 unix_inflight(scm->fp->fp[i]);
1329 skb->destructor = unix_destruct_fds;
1330 return 0;
1334 * Send AF_UNIX data.
1337 static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1338 struct msghdr *msg, size_t len)
1340 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1341 struct sock *sk = sock->sk;
1342 struct net *net = sock_net(sk);
1343 struct unix_sock *u = unix_sk(sk);
1344 struct sockaddr_un *sunaddr = msg->msg_name;
1345 struct sock *other = NULL;
1346 int namelen = 0; /* fake GCC */
1347 int err;
1348 unsigned hash;
1349 struct sk_buff *skb;
1350 long timeo;
1351 struct scm_cookie tmp_scm;
1353 if (NULL == siocb->scm)
1354 siocb->scm = &tmp_scm;
1355 err = scm_send(sock, msg, siocb->scm);
1356 if (err < 0)
1357 return err;
1359 err = -EOPNOTSUPP;
1360 if (msg->msg_flags&MSG_OOB)
1361 goto out;
1363 if (msg->msg_namelen) {
1364 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1365 if (err < 0)
1366 goto out;
1367 namelen = err;
1368 } else {
1369 sunaddr = NULL;
1370 err = -ENOTCONN;
1371 other = unix_peer_get(sk);
1372 if (!other)
1373 goto out;
1376 if (test_bit(SOCK_PASSCRED, &sock->flags)
1377 && !u->addr && (err = unix_autobind(sock)) != 0)
1378 goto out;
1380 err = -EMSGSIZE;
1381 if (len > sk->sk_sndbuf - 32)
1382 goto out;
1384 skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err);
1385 if (skb == NULL)
1386 goto out;
1388 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1389 if (siocb->scm->fp) {
1390 err = unix_attach_fds(siocb->scm, skb);
1391 if (err)
1392 goto out_free;
1394 unix_get_secdata(siocb->scm, skb);
1396 skb_reset_transport_header(skb);
1397 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
1398 if (err)
1399 goto out_free;
1401 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1403 restart:
1404 if (!other) {
1405 err = -ECONNRESET;
1406 if (sunaddr == NULL)
1407 goto out_free;
1409 other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
1410 hash, &err);
1411 if (other == NULL)
1412 goto out_free;
1415 unix_state_lock(other);
1416 err = -EPERM;
1417 if (!unix_may_send(sk, other))
1418 goto out_unlock;
1420 if (sock_flag(other, SOCK_DEAD)) {
1422 * Check with 1003.1g - what should
1423 * datagram error
1425 unix_state_unlock(other);
1426 sock_put(other);
1428 err = 0;
1429 unix_state_lock(sk);
1430 if (unix_peer(sk) == other) {
1431 unix_peer(sk) = NULL;
1432 unix_state_unlock(sk);
1434 unix_dgram_disconnected(sk, other);
1435 sock_put(other);
1436 err = -ECONNREFUSED;
1437 } else {
1438 unix_state_unlock(sk);
1441 other = NULL;
1442 if (err)
1443 goto out_free;
1444 goto restart;
1447 err = -EPIPE;
1448 if (other->sk_shutdown & RCV_SHUTDOWN)
1449 goto out_unlock;
1451 if (sk->sk_type != SOCK_SEQPACKET) {
1452 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1453 if (err)
1454 goto out_unlock;
1457 if (unix_peer(other) != sk && unix_recvq_full(other)) {
1458 if (!timeo) {
1459 err = -EAGAIN;
1460 goto out_unlock;
1463 timeo = unix_wait_for_peer(other, timeo);
1465 err = sock_intr_errno(timeo);
1466 if (signal_pending(current))
1467 goto out_free;
1469 goto restart;
1472 skb_queue_tail(&other->sk_receive_queue, skb);
1473 unix_state_unlock(other);
1474 other->sk_data_ready(other, len);
1475 sock_put(other);
1476 scm_destroy(siocb->scm);
1477 return len;
1479 out_unlock:
1480 unix_state_unlock(other);
1481 out_free:
1482 kfree_skb(skb);
1483 out:
1484 if (other)
1485 sock_put(other);
1486 scm_destroy(siocb->scm);
1487 return err;
1491 static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1492 struct msghdr *msg, size_t len)
1494 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1495 struct sock *sk = sock->sk;
1496 struct sock *other = NULL;
1497 struct sockaddr_un *sunaddr = msg->msg_name;
1498 int err, size;
1499 struct sk_buff *skb;
1500 int sent = 0;
1501 struct scm_cookie tmp_scm;
1503 if (NULL == siocb->scm)
1504 siocb->scm = &tmp_scm;
1505 err = scm_send(sock, msg, siocb->scm);
1506 if (err < 0)
1507 return err;
1509 err = -EOPNOTSUPP;
1510 if (msg->msg_flags&MSG_OOB)
1511 goto out_err;
1513 if (msg->msg_namelen) {
1514 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1515 goto out_err;
1516 } else {
1517 sunaddr = NULL;
1518 err = -ENOTCONN;
1519 other = unix_peer(sk);
1520 if (!other)
1521 goto out_err;
1524 if (sk->sk_shutdown & SEND_SHUTDOWN)
1525 goto pipe_err;
1527 while (sent < len) {
1529 * Optimisation for the fact that under 0.01% of X
1530 * messages typically need breaking up.
1533 size = len-sent;
1535 /* Keep two messages in the pipe so it schedules better */
1536 if (size > ((sk->sk_sndbuf >> 1) - 64))
1537 size = (sk->sk_sndbuf >> 1) - 64;
1539 if (size > SKB_MAX_ALLOC)
1540 size = SKB_MAX_ALLOC;
1543 * Grab a buffer
1546 skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT,
1547 &err);
1549 if (skb == NULL)
1550 goto out_err;
1553 * If you pass two values to the sock_alloc_send_skb
1554 * it tries to grab the large buffer with GFP_NOFS
1555 * (which can fail easily), and if it fails grab the
1556 * fallback size buffer which is under a page and will
1557 * succeed. [Alan]
1559 size = min_t(int, size, skb_tailroom(skb));
1561 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1562 if (siocb->scm->fp) {
1563 err = unix_attach_fds(siocb->scm, skb);
1564 if (err) {
1565 kfree_skb(skb);
1566 goto out_err;
1570 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
1571 if (err) {
1572 kfree_skb(skb);
1573 goto out_err;
1576 unix_state_lock(other);
1578 if (sock_flag(other, SOCK_DEAD) ||
1579 (other->sk_shutdown & RCV_SHUTDOWN))
1580 goto pipe_err_free;
1582 skb_queue_tail(&other->sk_receive_queue, skb);
1583 unix_state_unlock(other);
1584 other->sk_data_ready(other, size);
1585 sent += size;
1588 scm_destroy(siocb->scm);
1589 siocb->scm = NULL;
1591 return sent;
1593 pipe_err_free:
1594 unix_state_unlock(other);
1595 kfree_skb(skb);
1596 pipe_err:
1597 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
1598 send_sig(SIGPIPE, current, 0);
1599 err = -EPIPE;
1600 out_err:
1601 scm_destroy(siocb->scm);
1602 siocb->scm = NULL;
1603 return sent ? : err;
1606 static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
1607 struct msghdr *msg, size_t len)
1609 int err;
1610 struct sock *sk = sock->sk;
1612 err = sock_error(sk);
1613 if (err)
1614 return err;
1616 if (sk->sk_state != TCP_ESTABLISHED)
1617 return -ENOTCONN;
1619 if (msg->msg_namelen)
1620 msg->msg_namelen = 0;
1622 return unix_dgram_sendmsg(kiocb, sock, msg, len);
1625 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1627 struct unix_sock *u = unix_sk(sk);
1629 msg->msg_namelen = 0;
1630 if (u->addr) {
1631 msg->msg_namelen = u->addr->len;
1632 memcpy(msg->msg_name, u->addr->name, u->addr->len);
1636 static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1637 struct msghdr *msg, size_t size,
1638 int flags)
1640 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1641 struct scm_cookie tmp_scm;
1642 struct sock *sk = sock->sk;
1643 struct unix_sock *u = unix_sk(sk);
1644 int noblock = flags & MSG_DONTWAIT;
1645 struct sk_buff *skb;
1646 int err;
1648 err = -EOPNOTSUPP;
1649 if (flags&MSG_OOB)
1650 goto out;
1652 msg->msg_namelen = 0;
1654 mutex_lock(&u->readlock);
1656 skb = skb_recv_datagram(sk, flags, noblock, &err);
1657 if (!skb) {
1658 unix_state_lock(sk);
1659 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
1660 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
1661 (sk->sk_shutdown & RCV_SHUTDOWN))
1662 err = 0;
1663 unix_state_unlock(sk);
1664 goto out_unlock;
1667 wake_up_interruptible_sync(&u->peer_wait);
1669 if (msg->msg_name)
1670 unix_copy_addr(msg, skb->sk);
1672 if (size > skb->len)
1673 size = skb->len;
1674 else if (size < skb->len)
1675 msg->msg_flags |= MSG_TRUNC;
1677 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, size);
1678 if (err)
1679 goto out_free;
1681 if (!siocb->scm) {
1682 siocb->scm = &tmp_scm;
1683 memset(&tmp_scm, 0, sizeof(tmp_scm));
1685 siocb->scm->creds = *UNIXCREDS(skb);
1686 unix_set_secdata(siocb->scm, skb);
1688 if (!(flags & MSG_PEEK)) {
1689 if (UNIXCB(skb).fp)
1690 unix_detach_fds(siocb->scm, skb);
1691 } else {
1692 /* It is questionable: on PEEK we could:
1693 - do not return fds - good, but too simple 8)
1694 - return fds, and do not return them on read (old strategy,
1695 apparently wrong)
1696 - clone fds (I chose it for now, it is the most universal
1697 solution)
1699 POSIX 1003.1g does not actually define this clearly
1700 at all. POSIX 1003.1g doesn't define a lot of things
1701 clearly however!
1704 if (UNIXCB(skb).fp)
1705 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1707 err = size;
1709 scm_recv(sock, msg, siocb->scm, flags);
1711 out_free:
1712 skb_free_datagram(sk, skb);
1713 out_unlock:
1714 mutex_unlock(&u->readlock);
1715 out:
1716 return err;
1720 * Sleep until data has arrive. But check for races..
1723 static long unix_stream_data_wait(struct sock *sk, long timeo)
1725 DEFINE_WAIT(wait);
1727 unix_state_lock(sk);
1729 for (;;) {
1730 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1732 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1733 sk->sk_err ||
1734 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1735 signal_pending(current) ||
1736 !timeo)
1737 break;
1739 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1740 unix_state_unlock(sk);
1741 timeo = schedule_timeout(timeo);
1742 unix_state_lock(sk);
1743 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1746 finish_wait(sk->sk_sleep, &wait);
1747 unix_state_unlock(sk);
1748 return timeo;
1753 static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1754 struct msghdr *msg, size_t size,
1755 int flags)
1757 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1758 struct scm_cookie tmp_scm;
1759 struct sock *sk = sock->sk;
1760 struct unix_sock *u = unix_sk(sk);
1761 struct sockaddr_un *sunaddr = msg->msg_name;
1762 int copied = 0;
1763 int check_creds = 0;
1764 int target;
1765 int err = 0;
1766 long timeo;
1768 err = -EINVAL;
1769 if (sk->sk_state != TCP_ESTABLISHED)
1770 goto out;
1772 err = -EOPNOTSUPP;
1773 if (flags&MSG_OOB)
1774 goto out;
1776 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
1777 timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
1779 msg->msg_namelen = 0;
1781 /* Lock the socket to prevent queue disordering
1782 * while sleeps in memcpy_tomsg
1785 if (!siocb->scm) {
1786 siocb->scm = &tmp_scm;
1787 memset(&tmp_scm, 0, sizeof(tmp_scm));
1790 mutex_lock(&u->readlock);
1792 do {
1793 int chunk;
1794 struct sk_buff *skb;
1796 unix_state_lock(sk);
1797 skb = skb_dequeue(&sk->sk_receive_queue);
1798 if (skb == NULL) {
1799 if (copied >= target)
1800 goto unlock;
1803 * POSIX 1003.1g mandates this order.
1806 err = sock_error(sk);
1807 if (err)
1808 goto unlock;
1809 if (sk->sk_shutdown & RCV_SHUTDOWN)
1810 goto unlock;
1812 unix_state_unlock(sk);
1813 err = -EAGAIN;
1814 if (!timeo)
1815 break;
1816 mutex_unlock(&u->readlock);
1818 timeo = unix_stream_data_wait(sk, timeo);
1820 if (signal_pending(current)) {
1821 err = sock_intr_errno(timeo);
1822 goto out;
1824 mutex_lock(&u->readlock);
1825 continue;
1826 unlock:
1827 unix_state_unlock(sk);
1828 break;
1830 unix_state_unlock(sk);
1832 if (check_creds) {
1833 /* Never glue messages from different writers */
1834 if (memcmp(UNIXCREDS(skb), &siocb->scm->creds,
1835 sizeof(siocb->scm->creds)) != 0) {
1836 skb_queue_head(&sk->sk_receive_queue, skb);
1837 break;
1839 } else {
1840 /* Copy credentials */
1841 siocb->scm->creds = *UNIXCREDS(skb);
1842 check_creds = 1;
1845 /* Copy address just once */
1846 if (sunaddr) {
1847 unix_copy_addr(msg, skb->sk);
1848 sunaddr = NULL;
1851 chunk = min_t(unsigned int, skb->len, size);
1852 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
1853 skb_queue_head(&sk->sk_receive_queue, skb);
1854 if (copied == 0)
1855 copied = -EFAULT;
1856 break;
1858 copied += chunk;
1859 size -= chunk;
1861 /* Mark read part of skb as used */
1862 if (!(flags & MSG_PEEK)) {
1863 skb_pull(skb, chunk);
1865 if (UNIXCB(skb).fp)
1866 unix_detach_fds(siocb->scm, skb);
1868 /* put the skb back if we didn't use it up.. */
1869 if (skb->len) {
1870 skb_queue_head(&sk->sk_receive_queue, skb);
1871 break;
1874 kfree_skb(skb);
1876 if (siocb->scm->fp)
1877 break;
1878 } else {
1879 /* It is questionable, see note in unix_dgram_recvmsg.
1881 if (UNIXCB(skb).fp)
1882 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1884 /* put message back and return */
1885 skb_queue_head(&sk->sk_receive_queue, skb);
1886 break;
1888 } while (size);
1890 mutex_unlock(&u->readlock);
1891 scm_recv(sock, msg, siocb->scm, flags);
1892 out:
1893 return copied ? : err;
1896 static int unix_shutdown(struct socket *sock, int mode)
1898 struct sock *sk = sock->sk;
1899 struct sock *other;
1901 mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN);
1903 if (mode) {
1904 unix_state_lock(sk);
1905 sk->sk_shutdown |= mode;
1906 other = unix_peer(sk);
1907 if (other)
1908 sock_hold(other);
1909 unix_state_unlock(sk);
1910 sk->sk_state_change(sk);
1912 if (other &&
1913 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
1915 int peer_mode = 0;
1917 if (mode&RCV_SHUTDOWN)
1918 peer_mode |= SEND_SHUTDOWN;
1919 if (mode&SEND_SHUTDOWN)
1920 peer_mode |= RCV_SHUTDOWN;
1921 unix_state_lock(other);
1922 other->sk_shutdown |= peer_mode;
1923 unix_state_unlock(other);
1924 other->sk_state_change(other);
1925 read_lock(&other->sk_callback_lock);
1926 if (peer_mode == SHUTDOWN_MASK)
1927 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
1928 else if (peer_mode & RCV_SHUTDOWN)
1929 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
1930 read_unlock(&other->sk_callback_lock);
1932 if (other)
1933 sock_put(other);
1935 return 0;
1938 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1940 struct sock *sk = sock->sk;
1941 long amount = 0;
1942 int err;
1944 switch (cmd) {
1945 case SIOCOUTQ:
1946 amount = atomic_read(&sk->sk_wmem_alloc);
1947 err = put_user(amount, (int __user *)arg);
1948 break;
1949 case SIOCINQ:
1951 struct sk_buff *skb;
1953 if (sk->sk_state == TCP_LISTEN) {
1954 err = -EINVAL;
1955 break;
1958 spin_lock(&sk->sk_receive_queue.lock);
1959 if (sk->sk_type == SOCK_STREAM ||
1960 sk->sk_type == SOCK_SEQPACKET) {
1961 skb_queue_walk(&sk->sk_receive_queue, skb)
1962 amount += skb->len;
1963 } else {
1964 skb = skb_peek(&sk->sk_receive_queue);
1965 if (skb)
1966 amount = skb->len;
1968 spin_unlock(&sk->sk_receive_queue.lock);
1969 err = put_user(amount, (int __user *)arg);
1970 break;
1973 default:
1974 err = -ENOIOCTLCMD;
1975 break;
1977 return err;
1980 static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
1982 struct sock *sk = sock->sk;
1983 unsigned int mask;
1985 poll_wait(file, sk->sk_sleep, wait);
1986 mask = 0;
1988 /* exceptional events? */
1989 if (sk->sk_err)
1990 mask |= POLLERR;
1991 if (sk->sk_shutdown == SHUTDOWN_MASK)
1992 mask |= POLLHUP;
1993 if (sk->sk_shutdown & RCV_SHUTDOWN)
1994 mask |= POLLRDHUP;
1996 /* readable? */
1997 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1998 (sk->sk_shutdown & RCV_SHUTDOWN))
1999 mask |= POLLIN | POLLRDNORM;
2001 /* Connection-based need to check for termination and startup */
2002 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
2003 sk->sk_state == TCP_CLOSE)
2004 mask |= POLLHUP;
2007 * we set writable also when the other side has shut down the
2008 * connection. This prevents stuck sockets.
2010 if (unix_writable(sk))
2011 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2013 return mask;
2016 static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2017 poll_table *wait)
2019 struct sock *sk = sock->sk, *other;
2020 unsigned int mask, writable;
2022 poll_wait(file, sk->sk_sleep, wait);
2023 mask = 0;
2025 /* exceptional events? */
2026 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
2027 mask |= POLLERR;
2028 if (sk->sk_shutdown & RCV_SHUTDOWN)
2029 mask |= POLLRDHUP;
2030 if (sk->sk_shutdown == SHUTDOWN_MASK)
2031 mask |= POLLHUP;
2033 /* readable? */
2034 if (!skb_queue_empty(&sk->sk_receive_queue) ||
2035 (sk->sk_shutdown & RCV_SHUTDOWN))
2036 mask |= POLLIN | POLLRDNORM;
2038 /* Connection-based need to check for termination and startup */
2039 if (sk->sk_type == SOCK_SEQPACKET) {
2040 if (sk->sk_state == TCP_CLOSE)
2041 mask |= POLLHUP;
2042 /* connection hasn't started yet? */
2043 if (sk->sk_state == TCP_SYN_SENT)
2044 return mask;
2047 /* writable? */
2048 writable = unix_writable(sk);
2049 if (writable) {
2050 other = unix_peer_get(sk);
2051 if (other) {
2052 if (unix_peer(other) != sk) {
2053 poll_wait(file, &unix_sk(other)->peer_wait,
2054 wait);
2055 if (unix_recvq_full(other))
2056 writable = 0;
2059 sock_put(other);
2063 if (writable)
2064 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2065 else
2066 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
2068 return mask;
2071 #ifdef CONFIG_PROC_FS
2072 static struct sock *first_unix_socket(int *i)
2074 for (*i = 0; *i <= UNIX_HASH_SIZE; (*i)++) {
2075 if (!hlist_empty(&unix_socket_table[*i]))
2076 return __sk_head(&unix_socket_table[*i]);
2078 return NULL;
2081 static struct sock *next_unix_socket(int *i, struct sock *s)
2083 struct sock *next = sk_next(s);
2084 /* More in this chain? */
2085 if (next)
2086 return next;
2087 /* Look for next non-empty chain. */
2088 for ((*i)++; *i <= UNIX_HASH_SIZE; (*i)++) {
2089 if (!hlist_empty(&unix_socket_table[*i]))
2090 return __sk_head(&unix_socket_table[*i]);
2092 return NULL;
2095 struct unix_iter_state {
2096 struct seq_net_private p;
2097 int i;
2100 static struct sock *unix_seq_idx(struct seq_file *seq, loff_t pos)
2102 struct unix_iter_state *iter = seq->private;
2103 loff_t off = 0;
2104 struct sock *s;
2106 for (s = first_unix_socket(&iter->i); s; s = next_unix_socket(&iter->i, s)) {
2107 if (sock_net(s) != seq_file_net(seq))
2108 continue;
2109 if (off == pos)
2110 return s;
2111 ++off;
2113 return NULL;
2116 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
2117 __acquires(unix_table_lock)
2119 spin_lock(&unix_table_lock);
2120 return *pos ? unix_seq_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2123 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2125 struct unix_iter_state *iter = seq->private;
2126 struct sock *sk = v;
2127 ++*pos;
2129 if (v == SEQ_START_TOKEN)
2130 sk = first_unix_socket(&iter->i);
2131 else
2132 sk = next_unix_socket(&iter->i, sk);
2133 while (sk && (sock_net(sk) != seq_file_net(seq)))
2134 sk = next_unix_socket(&iter->i, sk);
2135 return sk;
2138 static void unix_seq_stop(struct seq_file *seq, void *v)
2139 __releases(unix_table_lock)
2141 spin_unlock(&unix_table_lock);
2144 static int unix_seq_show(struct seq_file *seq, void *v)
2147 if (v == SEQ_START_TOKEN)
2148 seq_puts(seq, "Num RefCount Protocol Flags Type St "
2149 "Inode Path\n");
2150 else {
2151 struct sock *s = v;
2152 struct unix_sock *u = unix_sk(s);
2153 unix_state_lock(s);
2155 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
2157 atomic_read(&s->sk_refcnt),
2159 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2160 s->sk_type,
2161 s->sk_socket ?
2162 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2163 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2164 sock_i_ino(s));
2166 if (u->addr) {
2167 int i, len;
2168 seq_putc(seq, ' ');
2170 i = 0;
2171 len = u->addr->len - sizeof(short);
2172 if (!UNIX_ABSTRACT(s))
2173 len--;
2174 else {
2175 seq_putc(seq, '@');
2176 i++;
2178 for ( ; i < len; i++)
2179 seq_putc(seq, u->addr->name->sun_path[i]);
2181 unix_state_unlock(s);
2182 seq_putc(seq, '\n');
2185 return 0;
2188 static const struct seq_operations unix_seq_ops = {
2189 .start = unix_seq_start,
2190 .next = unix_seq_next,
2191 .stop = unix_seq_stop,
2192 .show = unix_seq_show,
2195 static int unix_seq_open(struct inode *inode, struct file *file)
2197 return seq_open_net(inode, file, &unix_seq_ops,
2198 sizeof(struct unix_iter_state));
2201 static const struct file_operations unix_seq_fops = {
2202 .owner = THIS_MODULE,
2203 .open = unix_seq_open,
2204 .read = seq_read,
2205 .llseek = seq_lseek,
2206 .release = seq_release_net,
2209 #endif
2211 static struct net_proto_family unix_family_ops = {
2212 .family = PF_UNIX,
2213 .create = unix_create,
2214 .owner = THIS_MODULE,
2218 static int unix_net_init(struct net *net)
2220 int error = -ENOMEM;
2222 net->unx.sysctl_max_dgram_qlen = 10;
2223 if (unix_sysctl_register(net))
2224 goto out;
2226 #ifdef CONFIG_PROC_FS
2227 if (!proc_net_fops_create(net, "unix", 0, &unix_seq_fops)) {
2228 unix_sysctl_unregister(net);
2229 goto out;
2231 #endif
2232 error = 0;
2233 out:
2234 return error;
2237 static void unix_net_exit(struct net *net)
2239 unix_sysctl_unregister(net);
2240 proc_net_remove(net, "unix");
2243 static struct pernet_operations unix_net_ops = {
2244 .init = unix_net_init,
2245 .exit = unix_net_exit,
2248 static int __init af_unix_init(void)
2250 int rc = -1;
2251 struct sk_buff *dummy_skb;
2253 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof(dummy_skb->cb));
2255 rc = proto_register(&unix_proto, 1);
2256 if (rc != 0) {
2257 printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n",
2258 __func__);
2259 goto out;
2262 sock_register(&unix_family_ops);
2263 register_pernet_subsys(&unix_net_ops);
2264 out:
2265 return rc;
2268 static void __exit af_unix_exit(void)
2270 sock_unregister(PF_UNIX);
2271 proto_unregister(&unix_proto);
2272 unregister_pernet_subsys(&unix_net_ops);
2275 /* Earlier than device_initcall() so that other drivers invoking
2276 request_module() don't end up in a loop when modprobe tries
2277 to use a UNIX socket. But later than subsys_initcall() because
2278 we depend on stuff initialised there */
2279 fs_initcall(af_unix_init);
2280 module_exit(af_unix_exit);
2282 MODULE_LICENSE("GPL");
2283 MODULE_ALIAS_NETPROTO(PF_UNIX);