af_unix: limit recursion level
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / unix / af_unix.c
blob2268e6798124c9300cd4c10f5a65376898252210
1 /*
2 * NET4: Implementation of BSD Unix domain sockets.
4 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 * Fixes:
12 * Linus Torvalds : Assorted bug cures.
13 * Niibe Yutaka : async I/O support.
14 * Carsten Paeth : PF_UNIX check, address fixes.
15 * Alan Cox : Limit size of allocated blocks.
16 * Alan Cox : Fixed the stupid socketpair bug.
17 * Alan Cox : BSD compatibility fine tuning.
18 * Alan Cox : Fixed a bug in connect when interrupted.
19 * Alan Cox : Sorted out a proper draft version of
20 * file descriptor passing hacked up from
21 * Mike Shaver's work.
22 * Marty Leisner : Fixes to fd passing
23 * Nick Nevin : recvmsg bugfix.
24 * Alan Cox : Started proper garbage collector
25 * Heiko EiBfeldt : Missing verify_area check
26 * Alan Cox : Started POSIXisms
27 * Andreas Schwab : Replace inode by dentry for proper
28 * reference counting
29 * Kirk Petersen : Made this a module
30 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
31 * Lots of bug fixes.
32 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
33 * by above two patches.
34 * Andrea Arcangeli : If possible we block in connect(2)
35 * if the max backlog of the listen socket
36 * is been reached. This won't break
37 * old apps and it will avoid huge amount
38 * of socks hashed (this for unix_gc()
39 * performances reasons).
40 * Security fix that limits the max
41 * number of socks to 2*max_files and
42 * the number of skb queueable in the
43 * dgram receiver.
44 * Artur Skawina : Hash function optimizations
45 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
46 * Malcolm Beattie : Set peercred for socketpair
47 * Michal Ostrowski : Module initialization cleanup.
48 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
49 * the core infrastructure is doing that
50 * for all net proto families now (2.5.69+)
53 * Known differences from reference BSD that was tested:
55 * [TO FIX]
56 * ECONNREFUSED is not returned from one end of a connected() socket to the
57 * other the moment one end closes.
58 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
59 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
60 * [NOT TO FIX]
61 * accept() returns a path name even if the connecting socket has closed
62 * in the meantime (BSD loses the path and gives up).
63 * accept() returns 0 length path for an unbound connector. BSD returns 16
64 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
66 * BSD af_unix apparently has connect forgetting to block properly.
67 * (need to check this with the POSIX spec in detail)
69 * Differences from 2.0.0-11-... (ANK)
70 * Bug fixes and improvements.
71 * - client shutdown killed server socket.
72 * - removed all useless cli/sti pairs.
74 * Semantic changes/extensions.
75 * - generic control message passing.
76 * - SCM_CREDENTIALS control message.
77 * - "Abstract" (not FS based) socket bindings.
78 * Abstract names are sequences of bytes (not zero terminated)
79 * started by 0, so that this name space does not intersect
80 * with BSD names.
83 #include <linux/module.h>
84 #include <linux/kernel.h>
85 #include <linux/signal.h>
86 #include <linux/sched.h>
87 #include <linux/errno.h>
88 #include <linux/string.h>
89 #include <linux/stat.h>
90 #include <linux/dcache.h>
91 #include <linux/namei.h>
92 #include <linux/socket.h>
93 #include <linux/un.h>
94 #include <linux/fcntl.h>
95 #include <linux/termios.h>
96 #include <linux/sockios.h>
97 #include <linux/net.h>
98 #include <linux/in.h>
99 #include <linux/fs.h>
100 #include <linux/slab.h>
101 #include <asm/uaccess.h>
102 #include <linux/skbuff.h>
103 #include <linux/netdevice.h>
104 #include <net/net_namespace.h>
105 #include <net/sock.h>
106 #include <net/tcp_states.h>
107 #include <net/af_unix.h>
108 #include <linux/proc_fs.h>
109 #include <linux/seq_file.h>
110 #include <net/scm.h>
111 #include <linux/init.h>
112 #include <linux/poll.h>
113 #include <linux/rtnetlink.h>
114 #include <linux/mount.h>
115 #include <net/checksum.h>
116 #include <linux/security.h>
118 static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
119 static DEFINE_SPINLOCK(unix_table_lock);
120 static atomic_long_t unix_nr_socks;
122 #define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE])
124 #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
126 #ifdef CONFIG_SECURITY_NETWORK
127 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
129 memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
132 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
134 scm->secid = *UNIXSID(skb);
136 #else
137 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
140 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
142 #endif /* CONFIG_SECURITY_NETWORK */
145 * SMP locking strategy:
146 * hash table is protected with spinlock unix_table_lock
147 * each socket state is protected by separate spin lock.
150 static inline unsigned unix_hash_fold(__wsum n)
152 unsigned hash = (__force unsigned)n;
153 hash ^= hash>>16;
154 hash ^= hash>>8;
155 return hash&(UNIX_HASH_SIZE-1);
158 #define unix_peer(sk) (unix_sk(sk)->peer)
160 static inline int unix_our_peer(struct sock *sk, struct sock *osk)
162 return unix_peer(osk) == sk;
165 static inline int unix_may_send(struct sock *sk, struct sock *osk)
167 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
170 static inline int unix_recvq_full(struct sock const *sk)
172 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
175 static struct sock *unix_peer_get(struct sock *s)
177 struct sock *peer;
179 unix_state_lock(s);
180 peer = unix_peer(s);
181 if (peer)
182 sock_hold(peer);
183 unix_state_unlock(s);
184 return peer;
187 static inline void unix_release_addr(struct unix_address *addr)
189 if (atomic_dec_and_test(&addr->refcnt))
190 kfree(addr);
194 * Check unix socket name:
195 * - should be not zero length.
196 * - if started by not zero, should be NULL terminated (FS object)
197 * - if started by zero, it is abstract name.
200 static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned *hashp)
202 if (len <= sizeof(short) || len > sizeof(*sunaddr))
203 return -EINVAL;
204 if (!sunaddr || sunaddr->sun_family != AF_UNIX)
205 return -EINVAL;
206 if (sunaddr->sun_path[0]) {
208 * This may look like an off by one error but it is a bit more
209 * subtle. 108 is the longest valid AF_UNIX path for a binding.
210 * sun_path[108] doesnt as such exist. However in kernel space
211 * we are guaranteed that it is a valid memory location in our
212 * kernel address buffer.
214 ((char *)sunaddr)[len] = 0;
215 len = strlen(sunaddr->sun_path)+1+sizeof(short);
216 return len;
219 *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
220 return len;
223 static void __unix_remove_socket(struct sock *sk)
225 sk_del_node_init(sk);
228 static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
230 WARN_ON(!sk_unhashed(sk));
231 sk_add_node(sk, list);
234 static inline void unix_remove_socket(struct sock *sk)
236 spin_lock(&unix_table_lock);
237 __unix_remove_socket(sk);
238 spin_unlock(&unix_table_lock);
241 static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
243 spin_lock(&unix_table_lock);
244 __unix_insert_socket(list, sk);
245 spin_unlock(&unix_table_lock);
248 static struct sock *__unix_find_socket_byname(struct net *net,
249 struct sockaddr_un *sunname,
250 int len, int type, unsigned hash)
252 struct sock *s;
253 struct hlist_node *node;
255 sk_for_each(s, node, &unix_socket_table[hash ^ type]) {
256 struct unix_sock *u = unix_sk(s);
258 if (!net_eq(sock_net(s), net))
259 continue;
261 if (u->addr->len == len &&
262 !memcmp(u->addr->name, sunname, len))
263 goto found;
265 s = NULL;
266 found:
267 return s;
270 static inline struct sock *unix_find_socket_byname(struct net *net,
271 struct sockaddr_un *sunname,
272 int len, int type,
273 unsigned hash)
275 struct sock *s;
277 spin_lock(&unix_table_lock);
278 s = __unix_find_socket_byname(net, sunname, len, type, hash);
279 if (s)
280 sock_hold(s);
281 spin_unlock(&unix_table_lock);
282 return s;
285 static struct sock *unix_find_socket_byinode(struct inode *i)
287 struct sock *s;
288 struct hlist_node *node;
290 spin_lock(&unix_table_lock);
291 sk_for_each(s, node,
292 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
293 struct dentry *dentry = unix_sk(s)->dentry;
295 if (dentry && dentry->d_inode == i) {
296 sock_hold(s);
297 goto found;
300 s = NULL;
301 found:
302 spin_unlock(&unix_table_lock);
303 return s;
306 static inline int unix_writable(struct sock *sk)
308 return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
311 static void unix_write_space(struct sock *sk)
313 struct socket_wq *wq;
315 rcu_read_lock();
316 if (unix_writable(sk)) {
317 wq = rcu_dereference(sk->sk_wq);
318 if (wq_has_sleeper(wq))
319 wake_up_interruptible_sync(&wq->wait);
320 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
322 rcu_read_unlock();
325 /* When dgram socket disconnects (or changes its peer), we clear its receive
326 * queue of packets arrived from previous peer. First, it allows to do
327 * flow control based only on wmem_alloc; second, sk connected to peer
328 * may receive messages only from that peer. */
329 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
331 if (!skb_queue_empty(&sk->sk_receive_queue)) {
332 skb_queue_purge(&sk->sk_receive_queue);
333 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
335 /* If one link of bidirectional dgram pipe is disconnected,
336 * we signal error. Messages are lost. Do not make this,
337 * when peer was not connected to us.
339 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
340 other->sk_err = ECONNRESET;
341 other->sk_error_report(other);
346 static void unix_sock_destructor(struct sock *sk)
348 struct unix_sock *u = unix_sk(sk);
350 skb_queue_purge(&sk->sk_receive_queue);
352 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
353 WARN_ON(!sk_unhashed(sk));
354 WARN_ON(sk->sk_socket);
355 if (!sock_flag(sk, SOCK_DEAD)) {
356 printk(KERN_INFO "Attempt to release alive unix socket: %p\n", sk);
357 return;
360 if (u->addr)
361 unix_release_addr(u->addr);
363 atomic_long_dec(&unix_nr_socks);
364 local_bh_disable();
365 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
366 local_bh_enable();
367 #ifdef UNIX_REFCNT_DEBUG
368 printk(KERN_DEBUG "UNIX %p is destroyed, %ld are still alive.\n", sk,
369 atomic_long_read(&unix_nr_socks));
370 #endif
373 static int unix_release_sock(struct sock *sk, int embrion)
375 struct unix_sock *u = unix_sk(sk);
376 struct dentry *dentry;
377 struct vfsmount *mnt;
378 struct sock *skpair;
379 struct sk_buff *skb;
380 int state;
382 unix_remove_socket(sk);
384 /* Clear state */
385 unix_state_lock(sk);
386 sock_orphan(sk);
387 sk->sk_shutdown = SHUTDOWN_MASK;
388 dentry = u->dentry;
389 u->dentry = NULL;
390 mnt = u->mnt;
391 u->mnt = NULL;
392 state = sk->sk_state;
393 sk->sk_state = TCP_CLOSE;
394 unix_state_unlock(sk);
396 wake_up_interruptible_all(&u->peer_wait);
398 skpair = unix_peer(sk);
400 if (skpair != NULL) {
401 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
402 unix_state_lock(skpair);
403 /* No more writes */
404 skpair->sk_shutdown = SHUTDOWN_MASK;
405 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
406 skpair->sk_err = ECONNRESET;
407 unix_state_unlock(skpair);
408 skpair->sk_state_change(skpair);
409 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
411 sock_put(skpair); /* It may now die */
412 unix_peer(sk) = NULL;
415 /* Try to flush out this socket. Throw out buffers at least */
417 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
418 if (state == TCP_LISTEN)
419 unix_release_sock(skb->sk, 1);
420 /* passed fds are erased in the kfree_skb hook */
421 kfree_skb(skb);
424 if (dentry) {
425 dput(dentry);
426 mntput(mnt);
429 sock_put(sk);
431 /* ---- Socket is dead now and most probably destroyed ---- */
434 * Fixme: BSD difference: In BSD all sockets connected to use get
435 * ECONNRESET and we die on the spot. In Linux we behave
436 * like files and pipes do and wait for the last
437 * dereference.
439 * Can't we simply set sock->err?
441 * What the above comment does talk about? --ANK(980817)
444 if (unix_tot_inflight)
445 unix_gc(); /* Garbage collect fds */
447 return 0;
450 static void init_peercred(struct sock *sk)
452 put_pid(sk->sk_peer_pid);
453 if (sk->sk_peer_cred)
454 put_cred(sk->sk_peer_cred);
455 sk->sk_peer_pid = get_pid(task_tgid(current));
456 sk->sk_peer_cred = get_current_cred();
459 static void copy_peercred(struct sock *sk, struct sock *peersk)
461 put_pid(sk->sk_peer_pid);
462 if (sk->sk_peer_cred)
463 put_cred(sk->sk_peer_cred);
464 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
465 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
468 static int unix_listen(struct socket *sock, int backlog)
470 int err;
471 struct sock *sk = sock->sk;
472 struct unix_sock *u = unix_sk(sk);
473 struct pid *old_pid = NULL;
474 const struct cred *old_cred = NULL;
476 err = -EOPNOTSUPP;
477 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
478 goto out; /* Only stream/seqpacket sockets accept */
479 err = -EINVAL;
480 if (!u->addr)
481 goto out; /* No listens on an unbound socket */
482 unix_state_lock(sk);
483 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
484 goto out_unlock;
485 if (backlog > sk->sk_max_ack_backlog)
486 wake_up_interruptible_all(&u->peer_wait);
487 sk->sk_max_ack_backlog = backlog;
488 sk->sk_state = TCP_LISTEN;
489 /* set credentials so connect can copy them */
490 init_peercred(sk);
491 err = 0;
493 out_unlock:
494 unix_state_unlock(sk);
495 put_pid(old_pid);
496 if (old_cred)
497 put_cred(old_cred);
498 out:
499 return err;
502 static int unix_release(struct socket *);
503 static int unix_bind(struct socket *, struct sockaddr *, int);
504 static int unix_stream_connect(struct socket *, struct sockaddr *,
505 int addr_len, int flags);
506 static int unix_socketpair(struct socket *, struct socket *);
507 static int unix_accept(struct socket *, struct socket *, int);
508 static int unix_getname(struct socket *, struct sockaddr *, int *, int);
509 static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
510 static unsigned int unix_dgram_poll(struct file *, struct socket *,
511 poll_table *);
512 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
513 static int unix_shutdown(struct socket *, int);
514 static int unix_stream_sendmsg(struct kiocb *, struct socket *,
515 struct msghdr *, size_t);
516 static int unix_stream_recvmsg(struct kiocb *, struct socket *,
517 struct msghdr *, size_t, int);
518 static int unix_dgram_sendmsg(struct kiocb *, struct socket *,
519 struct msghdr *, size_t);
520 static int unix_dgram_recvmsg(struct kiocb *, struct socket *,
521 struct msghdr *, size_t, int);
522 static int unix_dgram_connect(struct socket *, struct sockaddr *,
523 int, int);
524 static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
525 struct msghdr *, size_t);
527 static const struct proto_ops unix_stream_ops = {
528 .family = PF_UNIX,
529 .owner = THIS_MODULE,
530 .release = unix_release,
531 .bind = unix_bind,
532 .connect = unix_stream_connect,
533 .socketpair = unix_socketpair,
534 .accept = unix_accept,
535 .getname = unix_getname,
536 .poll = unix_poll,
537 .ioctl = unix_ioctl,
538 .listen = unix_listen,
539 .shutdown = unix_shutdown,
540 .setsockopt = sock_no_setsockopt,
541 .getsockopt = sock_no_getsockopt,
542 .sendmsg = unix_stream_sendmsg,
543 .recvmsg = unix_stream_recvmsg,
544 .mmap = sock_no_mmap,
545 .sendpage = sock_no_sendpage,
548 static const struct proto_ops unix_dgram_ops = {
549 .family = PF_UNIX,
550 .owner = THIS_MODULE,
551 .release = unix_release,
552 .bind = unix_bind,
553 .connect = unix_dgram_connect,
554 .socketpair = unix_socketpair,
555 .accept = sock_no_accept,
556 .getname = unix_getname,
557 .poll = unix_dgram_poll,
558 .ioctl = unix_ioctl,
559 .listen = sock_no_listen,
560 .shutdown = unix_shutdown,
561 .setsockopt = sock_no_setsockopt,
562 .getsockopt = sock_no_getsockopt,
563 .sendmsg = unix_dgram_sendmsg,
564 .recvmsg = unix_dgram_recvmsg,
565 .mmap = sock_no_mmap,
566 .sendpage = sock_no_sendpage,
569 static const struct proto_ops unix_seqpacket_ops = {
570 .family = PF_UNIX,
571 .owner = THIS_MODULE,
572 .release = unix_release,
573 .bind = unix_bind,
574 .connect = unix_stream_connect,
575 .socketpair = unix_socketpair,
576 .accept = unix_accept,
577 .getname = unix_getname,
578 .poll = unix_dgram_poll,
579 .ioctl = unix_ioctl,
580 .listen = unix_listen,
581 .shutdown = unix_shutdown,
582 .setsockopt = sock_no_setsockopt,
583 .getsockopt = sock_no_getsockopt,
584 .sendmsg = unix_seqpacket_sendmsg,
585 .recvmsg = unix_dgram_recvmsg,
586 .mmap = sock_no_mmap,
587 .sendpage = sock_no_sendpage,
590 static struct proto unix_proto = {
591 .name = "UNIX",
592 .owner = THIS_MODULE,
593 .obj_size = sizeof(struct unix_sock),
597 * AF_UNIX sockets do not interact with hardware, hence they
598 * dont trigger interrupts - so it's safe for them to have
599 * bh-unsafe locking for their sk_receive_queue.lock. Split off
600 * this special lock-class by reinitializing the spinlock key:
602 static struct lock_class_key af_unix_sk_receive_queue_lock_key;
604 static struct sock *unix_create1(struct net *net, struct socket *sock)
606 struct sock *sk = NULL;
607 struct unix_sock *u;
609 atomic_long_inc(&unix_nr_socks);
610 if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
611 goto out;
613 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
614 if (!sk)
615 goto out;
617 sock_init_data(sock, sk);
618 lockdep_set_class(&sk->sk_receive_queue.lock,
619 &af_unix_sk_receive_queue_lock_key);
621 sk->sk_write_space = unix_write_space;
622 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
623 sk->sk_destruct = unix_sock_destructor;
624 u = unix_sk(sk);
625 u->dentry = NULL;
626 u->mnt = NULL;
627 spin_lock_init(&u->lock);
628 atomic_long_set(&u->inflight, 0);
629 INIT_LIST_HEAD(&u->link);
630 mutex_init(&u->readlock); /* single task reading lock */
631 init_waitqueue_head(&u->peer_wait);
632 unix_insert_socket(unix_sockets_unbound, sk);
633 out:
634 if (sk == NULL)
635 atomic_long_dec(&unix_nr_socks);
636 else {
637 local_bh_disable();
638 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
639 local_bh_enable();
641 return sk;
644 static int unix_create(struct net *net, struct socket *sock, int protocol,
645 int kern)
647 if (protocol && protocol != PF_UNIX)
648 return -EPROTONOSUPPORT;
650 sock->state = SS_UNCONNECTED;
652 switch (sock->type) {
653 case SOCK_STREAM:
654 sock->ops = &unix_stream_ops;
655 break;
657 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
658 * nothing uses it.
660 case SOCK_RAW:
661 sock->type = SOCK_DGRAM;
662 case SOCK_DGRAM:
663 sock->ops = &unix_dgram_ops;
664 break;
665 case SOCK_SEQPACKET:
666 sock->ops = &unix_seqpacket_ops;
667 break;
668 default:
669 return -ESOCKTNOSUPPORT;
672 return unix_create1(net, sock) ? 0 : -ENOMEM;
675 static int unix_release(struct socket *sock)
677 struct sock *sk = sock->sk;
679 if (!sk)
680 return 0;
682 sock->sk = NULL;
684 return unix_release_sock(sk, 0);
687 static int unix_autobind(struct socket *sock)
689 struct sock *sk = sock->sk;
690 struct net *net = sock_net(sk);
691 struct unix_sock *u = unix_sk(sk);
692 static u32 ordernum = 1;
693 struct unix_address *addr;
694 int err;
695 unsigned int retries = 0;
697 mutex_lock(&u->readlock);
699 err = 0;
700 if (u->addr)
701 goto out;
703 err = -ENOMEM;
704 addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
705 if (!addr)
706 goto out;
708 addr->name->sun_family = AF_UNIX;
709 atomic_set(&addr->refcnt, 1);
711 retry:
712 addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
713 addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
715 spin_lock(&unix_table_lock);
716 ordernum = (ordernum+1)&0xFFFFF;
718 if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
719 addr->hash)) {
720 spin_unlock(&unix_table_lock);
722 * __unix_find_socket_byname() may take long time if many names
723 * are already in use.
725 cond_resched();
726 /* Give up if all names seems to be in use. */
727 if (retries++ == 0xFFFFF) {
728 err = -ENOSPC;
729 kfree(addr);
730 goto out;
732 goto retry;
734 addr->hash ^= sk->sk_type;
736 __unix_remove_socket(sk);
737 u->addr = addr;
738 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
739 spin_unlock(&unix_table_lock);
740 err = 0;
742 out: mutex_unlock(&u->readlock);
743 return err;
746 static struct sock *unix_find_other(struct net *net,
747 struct sockaddr_un *sunname, int len,
748 int type, unsigned hash, int *error)
750 struct sock *u;
751 struct path path;
752 int err = 0;
754 if (sunname->sun_path[0]) {
755 struct inode *inode;
756 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
757 if (err)
758 goto fail;
759 inode = path.dentry->d_inode;
760 err = inode_permission(inode, MAY_WRITE);
761 if (err)
762 goto put_fail;
764 err = -ECONNREFUSED;
765 if (!S_ISSOCK(inode->i_mode))
766 goto put_fail;
767 u = unix_find_socket_byinode(inode);
768 if (!u)
769 goto put_fail;
771 if (u->sk_type == type)
772 touch_atime(path.mnt, path.dentry);
774 path_put(&path);
776 err = -EPROTOTYPE;
777 if (u->sk_type != type) {
778 sock_put(u);
779 goto fail;
781 } else {
782 err = -ECONNREFUSED;
783 u = unix_find_socket_byname(net, sunname, len, type, hash);
784 if (u) {
785 struct dentry *dentry;
786 dentry = unix_sk(u)->dentry;
787 if (dentry)
788 touch_atime(unix_sk(u)->mnt, dentry);
789 } else
790 goto fail;
792 return u;
794 put_fail:
795 path_put(&path);
796 fail:
797 *error = err;
798 return NULL;
802 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
804 struct sock *sk = sock->sk;
805 struct net *net = sock_net(sk);
806 struct unix_sock *u = unix_sk(sk);
807 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
808 struct dentry *dentry = NULL;
809 struct nameidata nd;
810 int err;
811 unsigned hash;
812 struct unix_address *addr;
813 struct hlist_head *list;
815 err = -EINVAL;
816 if (sunaddr->sun_family != AF_UNIX)
817 goto out;
819 if (addr_len == sizeof(short)) {
820 err = unix_autobind(sock);
821 goto out;
824 err = unix_mkname(sunaddr, addr_len, &hash);
825 if (err < 0)
826 goto out;
827 addr_len = err;
829 mutex_lock(&u->readlock);
831 err = -EINVAL;
832 if (u->addr)
833 goto out_up;
835 err = -ENOMEM;
836 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
837 if (!addr)
838 goto out_up;
840 memcpy(addr->name, sunaddr, addr_len);
841 addr->len = addr_len;
842 addr->hash = hash ^ sk->sk_type;
843 atomic_set(&addr->refcnt, 1);
845 if (sunaddr->sun_path[0]) {
846 unsigned int mode;
847 err = 0;
849 * Get the parent directory, calculate the hash for last
850 * component.
852 err = path_lookup(sunaddr->sun_path, LOOKUP_PARENT, &nd);
853 if (err)
854 goto out_mknod_parent;
856 dentry = lookup_create(&nd, 0);
857 err = PTR_ERR(dentry);
858 if (IS_ERR(dentry))
859 goto out_mknod_unlock;
862 * All right, let's create it.
864 mode = S_IFSOCK |
865 (SOCK_INODE(sock)->i_mode & ~current_umask());
866 err = mnt_want_write(nd.path.mnt);
867 if (err)
868 goto out_mknod_dput;
869 err = security_path_mknod(&nd.path, dentry, mode, 0);
870 if (err)
871 goto out_mknod_drop_write;
872 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
873 out_mknod_drop_write:
874 mnt_drop_write(nd.path.mnt);
875 if (err)
876 goto out_mknod_dput;
877 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
878 dput(nd.path.dentry);
879 nd.path.dentry = dentry;
881 addr->hash = UNIX_HASH_SIZE;
884 spin_lock(&unix_table_lock);
886 if (!sunaddr->sun_path[0]) {
887 err = -EADDRINUSE;
888 if (__unix_find_socket_byname(net, sunaddr, addr_len,
889 sk->sk_type, hash)) {
890 unix_release_addr(addr);
891 goto out_unlock;
894 list = &unix_socket_table[addr->hash];
895 } else {
896 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
897 u->dentry = nd.path.dentry;
898 u->mnt = nd.path.mnt;
901 err = 0;
902 __unix_remove_socket(sk);
903 u->addr = addr;
904 __unix_insert_socket(list, sk);
906 out_unlock:
907 spin_unlock(&unix_table_lock);
908 out_up:
909 mutex_unlock(&u->readlock);
910 out:
911 return err;
913 out_mknod_dput:
914 dput(dentry);
915 out_mknod_unlock:
916 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
917 path_put(&nd.path);
918 out_mknod_parent:
919 if (err == -EEXIST)
920 err = -EADDRINUSE;
921 unix_release_addr(addr);
922 goto out_up;
925 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
927 if (unlikely(sk1 == sk2) || !sk2) {
928 unix_state_lock(sk1);
929 return;
931 if (sk1 < sk2) {
932 unix_state_lock(sk1);
933 unix_state_lock_nested(sk2);
934 } else {
935 unix_state_lock(sk2);
936 unix_state_lock_nested(sk1);
940 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
942 if (unlikely(sk1 == sk2) || !sk2) {
943 unix_state_unlock(sk1);
944 return;
946 unix_state_unlock(sk1);
947 unix_state_unlock(sk2);
950 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
951 int alen, int flags)
953 struct sock *sk = sock->sk;
954 struct net *net = sock_net(sk);
955 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
956 struct sock *other;
957 unsigned hash;
958 int err;
960 if (addr->sa_family != AF_UNSPEC) {
961 err = unix_mkname(sunaddr, alen, &hash);
962 if (err < 0)
963 goto out;
964 alen = err;
966 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
967 !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
968 goto out;
970 restart:
971 other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
972 if (!other)
973 goto out;
975 unix_state_double_lock(sk, other);
977 /* Apparently VFS overslept socket death. Retry. */
978 if (sock_flag(other, SOCK_DEAD)) {
979 unix_state_double_unlock(sk, other);
980 sock_put(other);
981 goto restart;
984 err = -EPERM;
985 if (!unix_may_send(sk, other))
986 goto out_unlock;
988 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
989 if (err)
990 goto out_unlock;
992 } else {
994 * 1003.1g breaking connected state with AF_UNSPEC
996 other = NULL;
997 unix_state_double_lock(sk, other);
1001 * If it was connected, reconnect.
1003 if (unix_peer(sk)) {
1004 struct sock *old_peer = unix_peer(sk);
1005 unix_peer(sk) = other;
1006 unix_state_double_unlock(sk, other);
1008 if (other != old_peer)
1009 unix_dgram_disconnected(sk, old_peer);
1010 sock_put(old_peer);
1011 } else {
1012 unix_peer(sk) = other;
1013 unix_state_double_unlock(sk, other);
1015 return 0;
1017 out_unlock:
1018 unix_state_double_unlock(sk, other);
1019 sock_put(other);
1020 out:
1021 return err;
1024 static long unix_wait_for_peer(struct sock *other, long timeo)
1026 struct unix_sock *u = unix_sk(other);
1027 int sched;
1028 DEFINE_WAIT(wait);
1030 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1032 sched = !sock_flag(other, SOCK_DEAD) &&
1033 !(other->sk_shutdown & RCV_SHUTDOWN) &&
1034 unix_recvq_full(other);
1036 unix_state_unlock(other);
1038 if (sched)
1039 timeo = schedule_timeout(timeo);
1041 finish_wait(&u->peer_wait, &wait);
1042 return timeo;
1045 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1046 int addr_len, int flags)
1048 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1049 struct sock *sk = sock->sk;
1050 struct net *net = sock_net(sk);
1051 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1052 struct sock *newsk = NULL;
1053 struct sock *other = NULL;
1054 struct sk_buff *skb = NULL;
1055 unsigned hash;
1056 int st;
1057 int err;
1058 long timeo;
1060 err = unix_mkname(sunaddr, addr_len, &hash);
1061 if (err < 0)
1062 goto out;
1063 addr_len = err;
1065 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
1066 (err = unix_autobind(sock)) != 0)
1067 goto out;
1069 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1071 /* First of all allocate resources.
1072 If we will make it after state is locked,
1073 we will have to recheck all again in any case.
1076 err = -ENOMEM;
1078 /* create new sock for complete connection */
1079 newsk = unix_create1(sock_net(sk), NULL);
1080 if (newsk == NULL)
1081 goto out;
1083 /* Allocate skb for sending to listening sock */
1084 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1085 if (skb == NULL)
1086 goto out;
1088 restart:
1089 /* Find listening sock. */
1090 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
1091 if (!other)
1092 goto out;
1094 /* Latch state of peer */
1095 unix_state_lock(other);
1097 /* Apparently VFS overslept socket death. Retry. */
1098 if (sock_flag(other, SOCK_DEAD)) {
1099 unix_state_unlock(other);
1100 sock_put(other);
1101 goto restart;
1104 err = -ECONNREFUSED;
1105 if (other->sk_state != TCP_LISTEN)
1106 goto out_unlock;
1107 if (other->sk_shutdown & RCV_SHUTDOWN)
1108 goto out_unlock;
1110 if (unix_recvq_full(other)) {
1111 err = -EAGAIN;
1112 if (!timeo)
1113 goto out_unlock;
1115 timeo = unix_wait_for_peer(other, timeo);
1117 err = sock_intr_errno(timeo);
1118 if (signal_pending(current))
1119 goto out;
1120 sock_put(other);
1121 goto restart;
1124 /* Latch our state.
1126 It is tricky place. We need to grab write lock and cannot
1127 drop lock on peer. It is dangerous because deadlock is
1128 possible. Connect to self case and simultaneous
1129 attempt to connect are eliminated by checking socket
1130 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1131 check this before attempt to grab lock.
1133 Well, and we have to recheck the state after socket locked.
1135 st = sk->sk_state;
1137 switch (st) {
1138 case TCP_CLOSE:
1139 /* This is ok... continue with connect */
1140 break;
1141 case TCP_ESTABLISHED:
1142 /* Socket is already connected */
1143 err = -EISCONN;
1144 goto out_unlock;
1145 default:
1146 err = -EINVAL;
1147 goto out_unlock;
1150 unix_state_lock_nested(sk);
1152 if (sk->sk_state != st) {
1153 unix_state_unlock(sk);
1154 unix_state_unlock(other);
1155 sock_put(other);
1156 goto restart;
1159 err = security_unix_stream_connect(sock, other->sk_socket, newsk);
1160 if (err) {
1161 unix_state_unlock(sk);
1162 goto out_unlock;
1165 /* The way is open! Fastly set all the necessary fields... */
1167 sock_hold(sk);
1168 unix_peer(newsk) = sk;
1169 newsk->sk_state = TCP_ESTABLISHED;
1170 newsk->sk_type = sk->sk_type;
1171 init_peercred(newsk);
1172 newu = unix_sk(newsk);
1173 newsk->sk_wq = &newu->peer_wq;
1174 otheru = unix_sk(other);
1176 /* copy address information from listening to new sock*/
1177 if (otheru->addr) {
1178 atomic_inc(&otheru->addr->refcnt);
1179 newu->addr = otheru->addr;
1181 if (otheru->dentry) {
1182 newu->dentry = dget(otheru->dentry);
1183 newu->mnt = mntget(otheru->mnt);
1186 /* Set credentials */
1187 copy_peercred(sk, other);
1189 sock->state = SS_CONNECTED;
1190 sk->sk_state = TCP_ESTABLISHED;
1191 sock_hold(newsk);
1193 smp_mb__after_atomic_inc(); /* sock_hold() does an atomic_inc() */
1194 unix_peer(sk) = newsk;
1196 unix_state_unlock(sk);
1198 /* take ten and and send info to listening sock */
1199 spin_lock(&other->sk_receive_queue.lock);
1200 __skb_queue_tail(&other->sk_receive_queue, skb);
1201 spin_unlock(&other->sk_receive_queue.lock);
1202 unix_state_unlock(other);
1203 other->sk_data_ready(other, 0);
1204 sock_put(other);
1205 return 0;
1207 out_unlock:
1208 if (other)
1209 unix_state_unlock(other);
1211 out:
1212 kfree_skb(skb);
1213 if (newsk)
1214 unix_release_sock(newsk, 0);
1215 if (other)
1216 sock_put(other);
1217 return err;
1220 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1222 struct sock *ska = socka->sk, *skb = sockb->sk;
1224 /* Join our sockets back to back */
1225 sock_hold(ska);
1226 sock_hold(skb);
1227 unix_peer(ska) = skb;
1228 unix_peer(skb) = ska;
1229 init_peercred(ska);
1230 init_peercred(skb);
1232 if (ska->sk_type != SOCK_DGRAM) {
1233 ska->sk_state = TCP_ESTABLISHED;
1234 skb->sk_state = TCP_ESTABLISHED;
1235 socka->state = SS_CONNECTED;
1236 sockb->state = SS_CONNECTED;
1238 return 0;
1241 static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1243 struct sock *sk = sock->sk;
1244 struct sock *tsk;
1245 struct sk_buff *skb;
1246 int err;
1248 err = -EOPNOTSUPP;
1249 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1250 goto out;
1252 err = -EINVAL;
1253 if (sk->sk_state != TCP_LISTEN)
1254 goto out;
1256 /* If socket state is TCP_LISTEN it cannot change (for now...),
1257 * so that no locks are necessary.
1260 skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1261 if (!skb) {
1262 /* This means receive shutdown. */
1263 if (err == 0)
1264 err = -EINVAL;
1265 goto out;
1268 tsk = skb->sk;
1269 skb_free_datagram(sk, skb);
1270 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1272 /* attach accepted sock to socket */
1273 unix_state_lock(tsk);
1274 newsock->state = SS_CONNECTED;
1275 sock_graft(tsk, newsock);
1276 unix_state_unlock(tsk);
1277 return 0;
1279 out:
1280 return err;
1284 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1286 struct sock *sk = sock->sk;
1287 struct unix_sock *u;
1288 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1289 int err = 0;
1291 if (peer) {
1292 sk = unix_peer_get(sk);
1294 err = -ENOTCONN;
1295 if (!sk)
1296 goto out;
1297 err = 0;
1298 } else {
1299 sock_hold(sk);
1302 u = unix_sk(sk);
1303 unix_state_lock(sk);
1304 if (!u->addr) {
1305 sunaddr->sun_family = AF_UNIX;
1306 sunaddr->sun_path[0] = 0;
1307 *uaddr_len = sizeof(short);
1308 } else {
1309 struct unix_address *addr = u->addr;
1311 *uaddr_len = addr->len;
1312 memcpy(sunaddr, addr->name, *uaddr_len);
1314 unix_state_unlock(sk);
1315 sock_put(sk);
1316 out:
1317 return err;
1320 static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1322 int i;
1324 scm->fp = UNIXCB(skb).fp;
1325 UNIXCB(skb).fp = NULL;
1327 for (i = scm->fp->count-1; i >= 0; i--)
1328 unix_notinflight(scm->fp->fp[i]);
1331 static void unix_destruct_scm(struct sk_buff *skb)
1333 struct scm_cookie scm;
1334 memset(&scm, 0, sizeof(scm));
1335 scm.pid = UNIXCB(skb).pid;
1336 scm.cred = UNIXCB(skb).cred;
1337 if (UNIXCB(skb).fp)
1338 unix_detach_fds(&scm, skb);
1340 /* Alas, it calls VFS */
1341 /* So fscking what? fput() had been SMP-safe since the last Summer */
1342 scm_destroy(&scm);
1343 sock_wfree(skb);
1346 #define MAX_RECURSION_LEVEL 4
1348 static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1350 int i;
1351 unsigned char max_level = 0;
1352 int unix_sock_count = 0;
1354 for (i = scm->fp->count - 1; i >= 0; i--) {
1355 struct sock *sk = unix_get_socket(scm->fp->fp[i]);
1357 if (sk) {
1358 unix_sock_count++;
1359 max_level = max(max_level,
1360 unix_sk(sk)->recursion_level);
1363 if (unlikely(max_level > MAX_RECURSION_LEVEL))
1364 return -ETOOMANYREFS;
1367 * Need to duplicate file references for the sake of garbage
1368 * collection. Otherwise a socket in the fps might become a
1369 * candidate for GC while the skb is not yet queued.
1371 UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1372 if (!UNIXCB(skb).fp)
1373 return -ENOMEM;
1375 if (unix_sock_count) {
1376 for (i = scm->fp->count - 1; i >= 0; i--)
1377 unix_inflight(scm->fp->fp[i]);
1379 return max_level;
1382 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1384 int err = 0;
1385 UNIXCB(skb).pid = get_pid(scm->pid);
1386 UNIXCB(skb).cred = get_cred(scm->cred);
1387 UNIXCB(skb).fp = NULL;
1388 if (scm->fp && send_fds)
1389 err = unix_attach_fds(scm, skb);
1391 skb->destructor = unix_destruct_scm;
1392 return err;
1396 * Send AF_UNIX data.
1399 static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1400 struct msghdr *msg, size_t len)
1402 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1403 struct sock *sk = sock->sk;
1404 struct net *net = sock_net(sk);
1405 struct unix_sock *u = unix_sk(sk);
1406 struct sockaddr_un *sunaddr = msg->msg_name;
1407 struct sock *other = NULL;
1408 int namelen = 0; /* fake GCC */
1409 int err;
1410 unsigned hash;
1411 struct sk_buff *skb;
1412 long timeo;
1413 struct scm_cookie tmp_scm;
1414 int max_level;
1416 if (NULL == siocb->scm)
1417 siocb->scm = &tmp_scm;
1418 wait_for_unix_gc();
1419 err = scm_send(sock, msg, siocb->scm);
1420 if (err < 0)
1421 return err;
1423 err = -EOPNOTSUPP;
1424 if (msg->msg_flags&MSG_OOB)
1425 goto out;
1427 if (msg->msg_namelen) {
1428 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1429 if (err < 0)
1430 goto out;
1431 namelen = err;
1432 } else {
1433 sunaddr = NULL;
1434 err = -ENOTCONN;
1435 other = unix_peer_get(sk);
1436 if (!other)
1437 goto out;
1440 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
1441 && (err = unix_autobind(sock)) != 0)
1442 goto out;
1444 err = -EMSGSIZE;
1445 if (len > sk->sk_sndbuf - 32)
1446 goto out;
1448 skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err);
1449 if (skb == NULL)
1450 goto out;
1452 err = unix_scm_to_skb(siocb->scm, skb, true);
1453 if (err < 0)
1454 goto out_free;
1455 max_level = err + 1;
1456 unix_get_secdata(siocb->scm, skb);
1458 skb_reset_transport_header(skb);
1459 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
1460 if (err)
1461 goto out_free;
1463 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1465 restart:
1466 if (!other) {
1467 err = -ECONNRESET;
1468 if (sunaddr == NULL)
1469 goto out_free;
1471 other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
1472 hash, &err);
1473 if (other == NULL)
1474 goto out_free;
1477 unix_state_lock(other);
1478 err = -EPERM;
1479 if (!unix_may_send(sk, other))
1480 goto out_unlock;
1482 if (sock_flag(other, SOCK_DEAD)) {
1484 * Check with 1003.1g - what should
1485 * datagram error
1487 unix_state_unlock(other);
1488 sock_put(other);
1490 err = 0;
1491 unix_state_lock(sk);
1492 if (unix_peer(sk) == other) {
1493 unix_peer(sk) = NULL;
1494 unix_state_unlock(sk);
1496 unix_dgram_disconnected(sk, other);
1497 sock_put(other);
1498 err = -ECONNREFUSED;
1499 } else {
1500 unix_state_unlock(sk);
1503 other = NULL;
1504 if (err)
1505 goto out_free;
1506 goto restart;
1509 err = -EPIPE;
1510 if (other->sk_shutdown & RCV_SHUTDOWN)
1511 goto out_unlock;
1513 if (sk->sk_type != SOCK_SEQPACKET) {
1514 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1515 if (err)
1516 goto out_unlock;
1519 if (unix_peer(other) != sk && unix_recvq_full(other)) {
1520 if (!timeo) {
1521 err = -EAGAIN;
1522 goto out_unlock;
1525 timeo = unix_wait_for_peer(other, timeo);
1527 err = sock_intr_errno(timeo);
1528 if (signal_pending(current))
1529 goto out_free;
1531 goto restart;
1534 if (sock_flag(other, SOCK_RCVTSTAMP))
1535 __net_timestamp(skb);
1536 skb_queue_tail(&other->sk_receive_queue, skb);
1537 if (max_level > unix_sk(other)->recursion_level)
1538 unix_sk(other)->recursion_level = max_level;
1539 unix_state_unlock(other);
1540 other->sk_data_ready(other, len);
1541 sock_put(other);
1542 scm_destroy(siocb->scm);
1543 return len;
1545 out_unlock:
1546 unix_state_unlock(other);
1547 out_free:
1548 kfree_skb(skb);
1549 out:
1550 if (other)
1551 sock_put(other);
1552 scm_destroy(siocb->scm);
1553 return err;
1557 static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1558 struct msghdr *msg, size_t len)
1560 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1561 struct sock *sk = sock->sk;
1562 struct sock *other = NULL;
1563 struct sockaddr_un *sunaddr = msg->msg_name;
1564 int err, size;
1565 struct sk_buff *skb;
1566 int sent = 0;
1567 struct scm_cookie tmp_scm;
1568 bool fds_sent = false;
1569 int max_level;
1571 if (NULL == siocb->scm)
1572 siocb->scm = &tmp_scm;
1573 wait_for_unix_gc();
1574 err = scm_send(sock, msg, siocb->scm);
1575 if (err < 0)
1576 return err;
1578 err = -EOPNOTSUPP;
1579 if (msg->msg_flags&MSG_OOB)
1580 goto out_err;
1582 if (msg->msg_namelen) {
1583 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1584 goto out_err;
1585 } else {
1586 sunaddr = NULL;
1587 err = -ENOTCONN;
1588 other = unix_peer(sk);
1589 if (!other)
1590 goto out_err;
1593 if (sk->sk_shutdown & SEND_SHUTDOWN)
1594 goto pipe_err;
1596 while (sent < len) {
1598 * Optimisation for the fact that under 0.01% of X
1599 * messages typically need breaking up.
1602 size = len-sent;
1604 /* Keep two messages in the pipe so it schedules better */
1605 if (size > ((sk->sk_sndbuf >> 1) - 64))
1606 size = (sk->sk_sndbuf >> 1) - 64;
1608 if (size > SKB_MAX_ALLOC)
1609 size = SKB_MAX_ALLOC;
1612 * Grab a buffer
1615 skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT,
1616 &err);
1618 if (skb == NULL)
1619 goto out_err;
1622 * If you pass two values to the sock_alloc_send_skb
1623 * it tries to grab the large buffer with GFP_NOFS
1624 * (which can fail easily), and if it fails grab the
1625 * fallback size buffer which is under a page and will
1626 * succeed. [Alan]
1628 size = min_t(int, size, skb_tailroom(skb));
1631 /* Only send the fds in the first buffer */
1632 err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
1633 if (err < 0) {
1634 kfree_skb(skb);
1635 goto out_err;
1637 max_level = err + 1;
1638 fds_sent = true;
1640 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
1641 if (err) {
1642 kfree_skb(skb);
1643 goto out_err;
1646 unix_state_lock(other);
1648 if (sock_flag(other, SOCK_DEAD) ||
1649 (other->sk_shutdown & RCV_SHUTDOWN))
1650 goto pipe_err_free;
1652 skb_queue_tail(&other->sk_receive_queue, skb);
1653 if (max_level > unix_sk(other)->recursion_level)
1654 unix_sk(other)->recursion_level = max_level;
1655 unix_state_unlock(other);
1656 other->sk_data_ready(other, size);
1657 sent += size;
1660 scm_destroy(siocb->scm);
1661 siocb->scm = NULL;
1663 return sent;
1665 pipe_err_free:
1666 unix_state_unlock(other);
1667 kfree_skb(skb);
1668 pipe_err:
1669 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
1670 send_sig(SIGPIPE, current, 0);
1671 err = -EPIPE;
1672 out_err:
1673 scm_destroy(siocb->scm);
1674 siocb->scm = NULL;
1675 return sent ? : err;
1678 static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
1679 struct msghdr *msg, size_t len)
1681 int err;
1682 struct sock *sk = sock->sk;
1684 err = sock_error(sk);
1685 if (err)
1686 return err;
1688 if (sk->sk_state != TCP_ESTABLISHED)
1689 return -ENOTCONN;
1691 if (msg->msg_namelen)
1692 msg->msg_namelen = 0;
1694 return unix_dgram_sendmsg(kiocb, sock, msg, len);
1697 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1699 struct unix_sock *u = unix_sk(sk);
1701 msg->msg_namelen = 0;
1702 if (u->addr) {
1703 msg->msg_namelen = u->addr->len;
1704 memcpy(msg->msg_name, u->addr->name, u->addr->len);
1708 static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1709 struct msghdr *msg, size_t size,
1710 int flags)
1712 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1713 struct scm_cookie tmp_scm;
1714 struct sock *sk = sock->sk;
1715 struct unix_sock *u = unix_sk(sk);
1716 int noblock = flags & MSG_DONTWAIT;
1717 struct sk_buff *skb;
1718 int err;
1720 err = -EOPNOTSUPP;
1721 if (flags&MSG_OOB)
1722 goto out;
1724 msg->msg_namelen = 0;
1726 mutex_lock(&u->readlock);
1728 skb = skb_recv_datagram(sk, flags, noblock, &err);
1729 if (!skb) {
1730 unix_state_lock(sk);
1731 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
1732 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
1733 (sk->sk_shutdown & RCV_SHUTDOWN))
1734 err = 0;
1735 unix_state_unlock(sk);
1736 goto out_unlock;
1739 wake_up_interruptible_sync(&u->peer_wait);
1741 if (msg->msg_name)
1742 unix_copy_addr(msg, skb->sk);
1744 if (size > skb->len)
1745 size = skb->len;
1746 else if (size < skb->len)
1747 msg->msg_flags |= MSG_TRUNC;
1749 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, size);
1750 if (err)
1751 goto out_free;
1753 if (sock_flag(sk, SOCK_RCVTSTAMP))
1754 __sock_recv_timestamp(msg, sk, skb);
1756 if (!siocb->scm) {
1757 siocb->scm = &tmp_scm;
1758 memset(&tmp_scm, 0, sizeof(tmp_scm));
1760 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
1761 unix_set_secdata(siocb->scm, skb);
1763 if (!(flags & MSG_PEEK)) {
1764 if (UNIXCB(skb).fp)
1765 unix_detach_fds(siocb->scm, skb);
1766 } else {
1767 /* It is questionable: on PEEK we could:
1768 - do not return fds - good, but too simple 8)
1769 - return fds, and do not return them on read (old strategy,
1770 apparently wrong)
1771 - clone fds (I chose it for now, it is the most universal
1772 solution)
1774 POSIX 1003.1g does not actually define this clearly
1775 at all. POSIX 1003.1g doesn't define a lot of things
1776 clearly however!
1779 if (UNIXCB(skb).fp)
1780 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1782 err = size;
1784 scm_recv(sock, msg, siocb->scm, flags);
1786 out_free:
1787 skb_free_datagram(sk, skb);
1788 out_unlock:
1789 mutex_unlock(&u->readlock);
1790 out:
1791 return err;
1795 * Sleep until data has arrive. But check for races..
1798 static long unix_stream_data_wait(struct sock *sk, long timeo)
1800 DEFINE_WAIT(wait);
1802 unix_state_lock(sk);
1804 for (;;) {
1805 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1807 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1808 sk->sk_err ||
1809 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1810 signal_pending(current) ||
1811 !timeo)
1812 break;
1814 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1815 unix_state_unlock(sk);
1816 timeo = schedule_timeout(timeo);
1817 unix_state_lock(sk);
1818 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1821 finish_wait(sk_sleep(sk), &wait);
1822 unix_state_unlock(sk);
1823 return timeo;
1828 static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1829 struct msghdr *msg, size_t size,
1830 int flags)
1832 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1833 struct scm_cookie tmp_scm;
1834 struct sock *sk = sock->sk;
1835 struct unix_sock *u = unix_sk(sk);
1836 struct sockaddr_un *sunaddr = msg->msg_name;
1837 int copied = 0;
1838 int check_creds = 0;
1839 int target;
1840 int err = 0;
1841 long timeo;
1843 err = -EINVAL;
1844 if (sk->sk_state != TCP_ESTABLISHED)
1845 goto out;
1847 err = -EOPNOTSUPP;
1848 if (flags&MSG_OOB)
1849 goto out;
1851 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
1852 timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
1854 msg->msg_namelen = 0;
1856 /* Lock the socket to prevent queue disordering
1857 * while sleeps in memcpy_tomsg
1860 if (!siocb->scm) {
1861 siocb->scm = &tmp_scm;
1862 memset(&tmp_scm, 0, sizeof(tmp_scm));
1865 mutex_lock(&u->readlock);
1867 do {
1868 int chunk;
1869 struct sk_buff *skb;
1871 unix_state_lock(sk);
1872 skb = skb_dequeue(&sk->sk_receive_queue);
1873 if (skb == NULL) {
1874 unix_sk(sk)->recursion_level = 0;
1875 if (copied >= target)
1876 goto unlock;
1879 * POSIX 1003.1g mandates this order.
1882 err = sock_error(sk);
1883 if (err)
1884 goto unlock;
1885 if (sk->sk_shutdown & RCV_SHUTDOWN)
1886 goto unlock;
1888 unix_state_unlock(sk);
1889 err = -EAGAIN;
1890 if (!timeo)
1891 break;
1892 mutex_unlock(&u->readlock);
1894 timeo = unix_stream_data_wait(sk, timeo);
1896 if (signal_pending(current)) {
1897 err = sock_intr_errno(timeo);
1898 goto out;
1900 mutex_lock(&u->readlock);
1901 continue;
1902 unlock:
1903 unix_state_unlock(sk);
1904 break;
1906 unix_state_unlock(sk);
1908 if (check_creds) {
1909 /* Never glue messages from different writers */
1910 if ((UNIXCB(skb).pid != siocb->scm->pid) ||
1911 (UNIXCB(skb).cred != siocb->scm->cred)) {
1912 skb_queue_head(&sk->sk_receive_queue, skb);
1913 break;
1915 } else {
1916 /* Copy credentials */
1917 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
1918 check_creds = 1;
1921 /* Copy address just once */
1922 if (sunaddr) {
1923 unix_copy_addr(msg, skb->sk);
1924 sunaddr = NULL;
1927 chunk = min_t(unsigned int, skb->len, size);
1928 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
1929 skb_queue_head(&sk->sk_receive_queue, skb);
1930 if (copied == 0)
1931 copied = -EFAULT;
1932 break;
1934 copied += chunk;
1935 size -= chunk;
1937 /* Mark read part of skb as used */
1938 if (!(flags & MSG_PEEK)) {
1939 skb_pull(skb, chunk);
1941 if (UNIXCB(skb).fp)
1942 unix_detach_fds(siocb->scm, skb);
1944 /* put the skb back if we didn't use it up.. */
1945 if (skb->len) {
1946 skb_queue_head(&sk->sk_receive_queue, skb);
1947 break;
1950 consume_skb(skb);
1952 if (siocb->scm->fp)
1953 break;
1954 } else {
1955 /* It is questionable, see note in unix_dgram_recvmsg.
1957 if (UNIXCB(skb).fp)
1958 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1960 /* put message back and return */
1961 skb_queue_head(&sk->sk_receive_queue, skb);
1962 break;
1964 } while (size);
1966 mutex_unlock(&u->readlock);
1967 scm_recv(sock, msg, siocb->scm, flags);
1968 out:
1969 return copied ? : err;
1972 static int unix_shutdown(struct socket *sock, int mode)
1974 struct sock *sk = sock->sk;
1975 struct sock *other;
1977 mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN);
1979 if (mode) {
1980 unix_state_lock(sk);
1981 sk->sk_shutdown |= mode;
1982 other = unix_peer(sk);
1983 if (other)
1984 sock_hold(other);
1985 unix_state_unlock(sk);
1986 sk->sk_state_change(sk);
1988 if (other &&
1989 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
1991 int peer_mode = 0;
1993 if (mode&RCV_SHUTDOWN)
1994 peer_mode |= SEND_SHUTDOWN;
1995 if (mode&SEND_SHUTDOWN)
1996 peer_mode |= RCV_SHUTDOWN;
1997 unix_state_lock(other);
1998 other->sk_shutdown |= peer_mode;
1999 unix_state_unlock(other);
2000 other->sk_state_change(other);
2001 if (peer_mode == SHUTDOWN_MASK)
2002 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2003 else if (peer_mode & RCV_SHUTDOWN)
2004 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
2006 if (other)
2007 sock_put(other);
2009 return 0;
2012 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2014 struct sock *sk = sock->sk;
2015 long amount = 0;
2016 int err;
2018 switch (cmd) {
2019 case SIOCOUTQ:
2020 amount = sk_wmem_alloc_get(sk);
2021 err = put_user(amount, (int __user *)arg);
2022 break;
2023 case SIOCINQ:
2025 struct sk_buff *skb;
2027 if (sk->sk_state == TCP_LISTEN) {
2028 err = -EINVAL;
2029 break;
2032 spin_lock(&sk->sk_receive_queue.lock);
2033 if (sk->sk_type == SOCK_STREAM ||
2034 sk->sk_type == SOCK_SEQPACKET) {
2035 skb_queue_walk(&sk->sk_receive_queue, skb)
2036 amount += skb->len;
2037 } else {
2038 skb = skb_peek(&sk->sk_receive_queue);
2039 if (skb)
2040 amount = skb->len;
2042 spin_unlock(&sk->sk_receive_queue.lock);
2043 err = put_user(amount, (int __user *)arg);
2044 break;
2047 default:
2048 err = -ENOIOCTLCMD;
2049 break;
2051 return err;
2054 static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
2056 struct sock *sk = sock->sk;
2057 unsigned int mask;
2059 sock_poll_wait(file, sk_sleep(sk), wait);
2060 mask = 0;
2062 /* exceptional events? */
2063 if (sk->sk_err)
2064 mask |= POLLERR;
2065 if (sk->sk_shutdown == SHUTDOWN_MASK)
2066 mask |= POLLHUP;
2067 if (sk->sk_shutdown & RCV_SHUTDOWN)
2068 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2070 /* readable? */
2071 if (!skb_queue_empty(&sk->sk_receive_queue))
2072 mask |= POLLIN | POLLRDNORM;
2074 /* Connection-based need to check for termination and startup */
2075 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
2076 sk->sk_state == TCP_CLOSE)
2077 mask |= POLLHUP;
2080 * we set writable also when the other side has shut down the
2081 * connection. This prevents stuck sockets.
2083 if (unix_writable(sk))
2084 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2086 return mask;
2089 static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2090 poll_table *wait)
2092 struct sock *sk = sock->sk, *other;
2093 unsigned int mask, writable;
2095 sock_poll_wait(file, sk_sleep(sk), wait);
2096 mask = 0;
2098 /* exceptional events? */
2099 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
2100 mask |= POLLERR;
2101 if (sk->sk_shutdown & RCV_SHUTDOWN)
2102 mask |= POLLRDHUP;
2103 if (sk->sk_shutdown == SHUTDOWN_MASK)
2104 mask |= POLLHUP;
2106 /* readable? */
2107 if (!skb_queue_empty(&sk->sk_receive_queue) ||
2108 (sk->sk_shutdown & RCV_SHUTDOWN))
2109 mask |= POLLIN | POLLRDNORM;
2111 /* Connection-based need to check for termination and startup */
2112 if (sk->sk_type == SOCK_SEQPACKET) {
2113 if (sk->sk_state == TCP_CLOSE)
2114 mask |= POLLHUP;
2115 /* connection hasn't started yet? */
2116 if (sk->sk_state == TCP_SYN_SENT)
2117 return mask;
2120 /* writable? */
2121 writable = unix_writable(sk);
2122 if (writable) {
2123 other = unix_peer_get(sk);
2124 if (other) {
2125 if (unix_peer(other) != sk) {
2126 sock_poll_wait(file, &unix_sk(other)->peer_wait,
2127 wait);
2128 if (unix_recvq_full(other))
2129 writable = 0;
2132 sock_put(other);
2136 if (writable)
2137 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2138 else
2139 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
2141 return mask;
2144 #ifdef CONFIG_PROC_FS
2145 static struct sock *first_unix_socket(int *i)
2147 for (*i = 0; *i <= UNIX_HASH_SIZE; (*i)++) {
2148 if (!hlist_empty(&unix_socket_table[*i]))
2149 return __sk_head(&unix_socket_table[*i]);
2151 return NULL;
2154 static struct sock *next_unix_socket(int *i, struct sock *s)
2156 struct sock *next = sk_next(s);
2157 /* More in this chain? */
2158 if (next)
2159 return next;
2160 /* Look for next non-empty chain. */
2161 for ((*i)++; *i <= UNIX_HASH_SIZE; (*i)++) {
2162 if (!hlist_empty(&unix_socket_table[*i]))
2163 return __sk_head(&unix_socket_table[*i]);
2165 return NULL;
2168 struct unix_iter_state {
2169 struct seq_net_private p;
2170 int i;
2173 static struct sock *unix_seq_idx(struct seq_file *seq, loff_t pos)
2175 struct unix_iter_state *iter = seq->private;
2176 loff_t off = 0;
2177 struct sock *s;
2179 for (s = first_unix_socket(&iter->i); s; s = next_unix_socket(&iter->i, s)) {
2180 if (sock_net(s) != seq_file_net(seq))
2181 continue;
2182 if (off == pos)
2183 return s;
2184 ++off;
2186 return NULL;
2189 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
2190 __acquires(unix_table_lock)
2192 spin_lock(&unix_table_lock);
2193 return *pos ? unix_seq_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2196 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2198 struct unix_iter_state *iter = seq->private;
2199 struct sock *sk = v;
2200 ++*pos;
2202 if (v == SEQ_START_TOKEN)
2203 sk = first_unix_socket(&iter->i);
2204 else
2205 sk = next_unix_socket(&iter->i, sk);
2206 while (sk && (sock_net(sk) != seq_file_net(seq)))
2207 sk = next_unix_socket(&iter->i, sk);
2208 return sk;
2211 static void unix_seq_stop(struct seq_file *seq, void *v)
2212 __releases(unix_table_lock)
2214 spin_unlock(&unix_table_lock);
2217 static int unix_seq_show(struct seq_file *seq, void *v)
2220 if (v == SEQ_START_TOKEN)
2221 seq_puts(seq, "Num RefCount Protocol Flags Type St "
2222 "Inode Path\n");
2223 else {
2224 struct sock *s = v;
2225 struct unix_sock *u = unix_sk(s);
2226 unix_state_lock(s);
2228 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
2230 atomic_read(&s->sk_refcnt),
2232 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2233 s->sk_type,
2234 s->sk_socket ?
2235 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2236 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2237 sock_i_ino(s));
2239 if (u->addr) {
2240 int i, len;
2241 seq_putc(seq, ' ');
2243 i = 0;
2244 len = u->addr->len - sizeof(short);
2245 if (!UNIX_ABSTRACT(s))
2246 len--;
2247 else {
2248 seq_putc(seq, '@');
2249 i++;
2251 for ( ; i < len; i++)
2252 seq_putc(seq, u->addr->name->sun_path[i]);
2254 unix_state_unlock(s);
2255 seq_putc(seq, '\n');
2258 return 0;
2261 static const struct seq_operations unix_seq_ops = {
2262 .start = unix_seq_start,
2263 .next = unix_seq_next,
2264 .stop = unix_seq_stop,
2265 .show = unix_seq_show,
2268 static int unix_seq_open(struct inode *inode, struct file *file)
2270 return seq_open_net(inode, file, &unix_seq_ops,
2271 sizeof(struct unix_iter_state));
2274 static const struct file_operations unix_seq_fops = {
2275 .owner = THIS_MODULE,
2276 .open = unix_seq_open,
2277 .read = seq_read,
2278 .llseek = seq_lseek,
2279 .release = seq_release_net,
2282 #endif
2284 static const struct net_proto_family unix_family_ops = {
2285 .family = PF_UNIX,
2286 .create = unix_create,
2287 .owner = THIS_MODULE,
2291 static int __net_init unix_net_init(struct net *net)
2293 int error = -ENOMEM;
2295 net->unx.sysctl_max_dgram_qlen = 10;
2296 if (unix_sysctl_register(net))
2297 goto out;
2299 #ifdef CONFIG_PROC_FS
2300 if (!proc_net_fops_create(net, "unix", 0, &unix_seq_fops)) {
2301 unix_sysctl_unregister(net);
2302 goto out;
2304 #endif
2305 error = 0;
2306 out:
2307 return error;
2310 static void __net_exit unix_net_exit(struct net *net)
2312 unix_sysctl_unregister(net);
2313 proc_net_remove(net, "unix");
2316 static struct pernet_operations unix_net_ops = {
2317 .init = unix_net_init,
2318 .exit = unix_net_exit,
2321 static int __init af_unix_init(void)
2323 int rc = -1;
2324 struct sk_buff *dummy_skb;
2326 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof(dummy_skb->cb));
2328 rc = proto_register(&unix_proto, 1);
2329 if (rc != 0) {
2330 printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n",
2331 __func__);
2332 goto out;
2335 sock_register(&unix_family_ops);
2336 register_pernet_subsys(&unix_net_ops);
2337 out:
2338 return rc;
2341 static void __exit af_unix_exit(void)
2343 sock_unregister(PF_UNIX);
2344 proto_unregister(&unix_proto);
2345 unregister_pernet_subsys(&unix_net_ops);
2348 /* Earlier than device_initcall() so that other drivers invoking
2349 request_module() don't end up in a loop when modprobe tries
2350 to use a UNIX socket. But later than subsys_initcall() because
2351 we depend on stuff initialised there */
2352 fs_initcall(af_unix_init);
2353 module_exit(af_unix_exit);
2355 MODULE_LICENSE("GPL");
2356 MODULE_ALIAS_NETPROTO(PF_UNIX);