ACPI: thinkpad-acpi: add development version tag
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / unix / af_unix.c
blobd63e7a206142b45fd8989129c44a737de781fe93
1 /*
2 * NET4: Implementation of BSD Unix domain sockets.
4 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 * Fixes:
12 * Linus Torvalds : Assorted bug cures.
13 * Niibe Yutaka : async I/O support.
14 * Carsten Paeth : PF_UNIX check, address fixes.
15 * Alan Cox : Limit size of allocated blocks.
16 * Alan Cox : Fixed the stupid socketpair bug.
17 * Alan Cox : BSD compatibility fine tuning.
18 * Alan Cox : Fixed a bug in connect when interrupted.
19 * Alan Cox : Sorted out a proper draft version of
20 * file descriptor passing hacked up from
21 * Mike Shaver's work.
22 * Marty Leisner : Fixes to fd passing
23 * Nick Nevin : recvmsg bugfix.
24 * Alan Cox : Started proper garbage collector
25 * Heiko EiBfeldt : Missing verify_area check
26 * Alan Cox : Started POSIXisms
27 * Andreas Schwab : Replace inode by dentry for proper
28 * reference counting
29 * Kirk Petersen : Made this a module
30 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
31 * Lots of bug fixes.
32 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
33 * by above two patches.
34 * Andrea Arcangeli : If possible we block in connect(2)
35 * if the max backlog of the listen socket
36 * is been reached. This won't break
37 * old apps and it will avoid huge amount
38 * of socks hashed (this for unix_gc()
39 * performances reasons).
40 * Security fix that limits the max
41 * number of socks to 2*max_files and
42 * the number of skb queueable in the
43 * dgram receiver.
44 * Artur Skawina : Hash function optimizations
45 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
46 * Malcolm Beattie : Set peercred for socketpair
47 * Michal Ostrowski : Module initialization cleanup.
48 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
49 * the core infrastructure is doing that
50 * for all net proto families now (2.5.69+)
53 * Known differences from reference BSD that was tested:
55 * [TO FIX]
56 * ECONNREFUSED is not returned from one end of a connected() socket to the
57 * other the moment one end closes.
58 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
59 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
60 * [NOT TO FIX]
61 * accept() returns a path name even if the connecting socket has closed
62 * in the meantime (BSD loses the path and gives up).
63 * accept() returns 0 length path for an unbound connector. BSD returns 16
64 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
66 * BSD af_unix apparently has connect forgetting to block properly.
67 * (need to check this with the POSIX spec in detail)
69 * Differences from 2.0.0-11-... (ANK)
70 * Bug fixes and improvements.
71 * - client shutdown killed server socket.
72 * - removed all useless cli/sti pairs.
74 * Semantic changes/extensions.
75 * - generic control message passing.
76 * - SCM_CREDENTIALS control message.
77 * - "Abstract" (not FS based) socket bindings.
78 * Abstract names are sequences of bytes (not zero terminated)
79 * started by 0, so that this name space does not intersect
80 * with BSD names.
83 #include <linux/module.h>
84 #include <linux/kernel.h>
85 #include <linux/signal.h>
86 #include <linux/sched.h>
87 #include <linux/errno.h>
88 #include <linux/string.h>
89 #include <linux/stat.h>
90 #include <linux/dcache.h>
91 #include <linux/namei.h>
92 #include <linux/socket.h>
93 #include <linux/un.h>
94 #include <linux/fcntl.h>
95 #include <linux/termios.h>
96 #include <linux/sockios.h>
97 #include <linux/net.h>
98 #include <linux/in.h>
99 #include <linux/fs.h>
100 #include <linux/slab.h>
101 #include <asm/uaccess.h>
102 #include <linux/skbuff.h>
103 #include <linux/netdevice.h>
104 #include <net/net_namespace.h>
105 #include <net/sock.h>
106 #include <net/tcp_states.h>
107 #include <net/af_unix.h>
108 #include <linux/proc_fs.h>
109 #include <linux/seq_file.h>
110 #include <net/scm.h>
111 #include <linux/init.h>
112 #include <linux/poll.h>
113 #include <linux/rtnetlink.h>
114 #include <linux/mount.h>
115 #include <net/checksum.h>
116 #include <linux/security.h>
118 static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
119 static DEFINE_SPINLOCK(unix_table_lock);
120 static atomic_t unix_nr_socks = ATOMIC_INIT(0);
122 #define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE])
124 #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
126 #ifdef CONFIG_SECURITY_NETWORK
127 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
129 memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
132 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
134 scm->secid = *UNIXSID(skb);
136 #else
137 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
140 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
142 #endif /* CONFIG_SECURITY_NETWORK */
145 * SMP locking strategy:
146 * hash table is protected with spinlock unix_table_lock
147 * each socket state is protected by separate spin lock.
150 static inline unsigned unix_hash_fold(__wsum n)
152 unsigned hash = (__force unsigned)n;
153 hash ^= hash>>16;
154 hash ^= hash>>8;
155 return hash&(UNIX_HASH_SIZE-1);
158 #define unix_peer(sk) (unix_sk(sk)->peer)
160 static inline int unix_our_peer(struct sock *sk, struct sock *osk)
162 return unix_peer(osk) == sk;
165 static inline int unix_may_send(struct sock *sk, struct sock *osk)
167 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
170 static inline int unix_recvq_full(struct sock const *sk)
172 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
175 static struct sock *unix_peer_get(struct sock *s)
177 struct sock *peer;
179 unix_state_lock(s);
180 peer = unix_peer(s);
181 if (peer)
182 sock_hold(peer);
183 unix_state_unlock(s);
184 return peer;
187 static inline void unix_release_addr(struct unix_address *addr)
189 if (atomic_dec_and_test(&addr->refcnt))
190 kfree(addr);
194 * Check unix socket name:
195 * - should be not zero length.
196 * - if started by not zero, should be NULL terminated (FS object)
197 * - if started by zero, it is abstract name.
200 static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned *hashp)
202 if (len <= sizeof(short) || len > sizeof(*sunaddr))
203 return -EINVAL;
204 if (!sunaddr || sunaddr->sun_family != AF_UNIX)
205 return -EINVAL;
206 if (sunaddr->sun_path[0]) {
208 * This may look like an off by one error but it is a bit more
209 * subtle. 108 is the longest valid AF_UNIX path for a binding.
210 * sun_path[108] doesnt as such exist. However in kernel space
211 * we are guaranteed that it is a valid memory location in our
212 * kernel address buffer.
214 ((char *)sunaddr)[len] = 0;
215 len = strlen(sunaddr->sun_path)+1+sizeof(short);
216 return len;
219 *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
220 return len;
223 static void __unix_remove_socket(struct sock *sk)
225 sk_del_node_init(sk);
228 static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
230 WARN_ON(!sk_unhashed(sk));
231 sk_add_node(sk, list);
234 static inline void unix_remove_socket(struct sock *sk)
236 spin_lock(&unix_table_lock);
237 __unix_remove_socket(sk);
238 spin_unlock(&unix_table_lock);
241 static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
243 spin_lock(&unix_table_lock);
244 __unix_insert_socket(list, sk);
245 spin_unlock(&unix_table_lock);
248 static struct sock *__unix_find_socket_byname(struct net *net,
249 struct sockaddr_un *sunname,
250 int len, int type, unsigned hash)
252 struct sock *s;
253 struct hlist_node *node;
255 sk_for_each(s, node, &unix_socket_table[hash ^ type]) {
256 struct unix_sock *u = unix_sk(s);
258 if (!net_eq(sock_net(s), net))
259 continue;
261 if (u->addr->len == len &&
262 !memcmp(u->addr->name, sunname, len))
263 goto found;
265 s = NULL;
266 found:
267 return s;
270 static inline struct sock *unix_find_socket_byname(struct net *net,
271 struct sockaddr_un *sunname,
272 int len, int type,
273 unsigned hash)
275 struct sock *s;
277 spin_lock(&unix_table_lock);
278 s = __unix_find_socket_byname(net, sunname, len, type, hash);
279 if (s)
280 sock_hold(s);
281 spin_unlock(&unix_table_lock);
282 return s;
285 static struct sock *unix_find_socket_byinode(struct net *net, struct inode *i)
287 struct sock *s;
288 struct hlist_node *node;
290 spin_lock(&unix_table_lock);
291 sk_for_each(s, node,
292 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
293 struct dentry *dentry = unix_sk(s)->dentry;
295 if (!net_eq(sock_net(s), net))
296 continue;
298 if (dentry && dentry->d_inode == i) {
299 sock_hold(s);
300 goto found;
303 s = NULL;
304 found:
305 spin_unlock(&unix_table_lock);
306 return s;
309 static inline int unix_writable(struct sock *sk)
311 return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
314 static void unix_write_space(struct sock *sk)
316 struct socket_wq *wq;
318 rcu_read_lock();
319 if (unix_writable(sk)) {
320 wq = rcu_dereference(sk->sk_wq);
321 if (wq_has_sleeper(wq))
322 wake_up_interruptible_sync(&wq->wait);
323 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
325 rcu_read_unlock();
328 /* When dgram socket disconnects (or changes its peer), we clear its receive
329 * queue of packets arrived from previous peer. First, it allows to do
330 * flow control based only on wmem_alloc; second, sk connected to peer
331 * may receive messages only from that peer. */
332 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
334 if (!skb_queue_empty(&sk->sk_receive_queue)) {
335 skb_queue_purge(&sk->sk_receive_queue);
336 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
338 /* If one link of bidirectional dgram pipe is disconnected,
339 * we signal error. Messages are lost. Do not make this,
340 * when peer was not connected to us.
342 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
343 other->sk_err = ECONNRESET;
344 other->sk_error_report(other);
349 static void unix_sock_destructor(struct sock *sk)
351 struct unix_sock *u = unix_sk(sk);
353 skb_queue_purge(&sk->sk_receive_queue);
355 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
356 WARN_ON(!sk_unhashed(sk));
357 WARN_ON(sk->sk_socket);
358 if (!sock_flag(sk, SOCK_DEAD)) {
359 printk(KERN_INFO "Attempt to release alive unix socket: %p\n", sk);
360 return;
363 if (u->addr)
364 unix_release_addr(u->addr);
366 atomic_dec(&unix_nr_socks);
367 local_bh_disable();
368 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
369 local_bh_enable();
370 #ifdef UNIX_REFCNT_DEBUG
371 printk(KERN_DEBUG "UNIX %p is destroyed, %d are still alive.\n", sk,
372 atomic_read(&unix_nr_socks));
373 #endif
376 static int unix_release_sock(struct sock *sk, int embrion)
378 struct unix_sock *u = unix_sk(sk);
379 struct dentry *dentry;
380 struct vfsmount *mnt;
381 struct sock *skpair;
382 struct sk_buff *skb;
383 int state;
385 unix_remove_socket(sk);
387 /* Clear state */
388 unix_state_lock(sk);
389 sock_orphan(sk);
390 sk->sk_shutdown = SHUTDOWN_MASK;
391 dentry = u->dentry;
392 u->dentry = NULL;
393 mnt = u->mnt;
394 u->mnt = NULL;
395 state = sk->sk_state;
396 sk->sk_state = TCP_CLOSE;
397 unix_state_unlock(sk);
399 wake_up_interruptible_all(&u->peer_wait);
401 skpair = unix_peer(sk);
403 if (skpair != NULL) {
404 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
405 unix_state_lock(skpair);
406 /* No more writes */
407 skpair->sk_shutdown = SHUTDOWN_MASK;
408 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
409 skpair->sk_err = ECONNRESET;
410 unix_state_unlock(skpair);
411 skpair->sk_state_change(skpair);
412 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
414 sock_put(skpair); /* It may now die */
415 unix_peer(sk) = NULL;
418 /* Try to flush out this socket. Throw out buffers at least */
420 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
421 if (state == TCP_LISTEN)
422 unix_release_sock(skb->sk, 1);
423 /* passed fds are erased in the kfree_skb hook */
424 kfree_skb(skb);
427 if (dentry) {
428 dput(dentry);
429 mntput(mnt);
432 sock_put(sk);
434 /* ---- Socket is dead now and most probably destroyed ---- */
437 * Fixme: BSD difference: In BSD all sockets connected to use get
438 * ECONNRESET and we die on the spot. In Linux we behave
439 * like files and pipes do and wait for the last
440 * dereference.
442 * Can't we simply set sock->err?
444 * What the above comment does talk about? --ANK(980817)
447 if (unix_tot_inflight)
448 unix_gc(); /* Garbage collect fds */
450 return 0;
453 static int unix_listen(struct socket *sock, int backlog)
455 int err;
456 struct sock *sk = sock->sk;
457 struct unix_sock *u = unix_sk(sk);
459 err = -EOPNOTSUPP;
460 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
461 goto out; /* Only stream/seqpacket sockets accept */
462 err = -EINVAL;
463 if (!u->addr)
464 goto out; /* No listens on an unbound socket */
465 unix_state_lock(sk);
466 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
467 goto out_unlock;
468 if (backlog > sk->sk_max_ack_backlog)
469 wake_up_interruptible_all(&u->peer_wait);
470 sk->sk_max_ack_backlog = backlog;
471 sk->sk_state = TCP_LISTEN;
472 /* set credentials so connect can copy them */
473 sk->sk_peercred.pid = task_tgid_vnr(current);
474 current_euid_egid(&sk->sk_peercred.uid, &sk->sk_peercred.gid);
475 err = 0;
477 out_unlock:
478 unix_state_unlock(sk);
479 out:
480 return err;
483 static int unix_release(struct socket *);
484 static int unix_bind(struct socket *, struct sockaddr *, int);
485 static int unix_stream_connect(struct socket *, struct sockaddr *,
486 int addr_len, int flags);
487 static int unix_socketpair(struct socket *, struct socket *);
488 static int unix_accept(struct socket *, struct socket *, int);
489 static int unix_getname(struct socket *, struct sockaddr *, int *, int);
490 static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
491 static unsigned int unix_dgram_poll(struct file *, struct socket *,
492 poll_table *);
493 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
494 static int unix_shutdown(struct socket *, int);
495 static int unix_stream_sendmsg(struct kiocb *, struct socket *,
496 struct msghdr *, size_t);
497 static int unix_stream_recvmsg(struct kiocb *, struct socket *,
498 struct msghdr *, size_t, int);
499 static int unix_dgram_sendmsg(struct kiocb *, struct socket *,
500 struct msghdr *, size_t);
501 static int unix_dgram_recvmsg(struct kiocb *, struct socket *,
502 struct msghdr *, size_t, int);
503 static int unix_dgram_connect(struct socket *, struct sockaddr *,
504 int, int);
505 static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
506 struct msghdr *, size_t);
508 static const struct proto_ops unix_stream_ops = {
509 .family = PF_UNIX,
510 .owner = THIS_MODULE,
511 .release = unix_release,
512 .bind = unix_bind,
513 .connect = unix_stream_connect,
514 .socketpair = unix_socketpair,
515 .accept = unix_accept,
516 .getname = unix_getname,
517 .poll = unix_poll,
518 .ioctl = unix_ioctl,
519 .listen = unix_listen,
520 .shutdown = unix_shutdown,
521 .setsockopt = sock_no_setsockopt,
522 .getsockopt = sock_no_getsockopt,
523 .sendmsg = unix_stream_sendmsg,
524 .recvmsg = unix_stream_recvmsg,
525 .mmap = sock_no_mmap,
526 .sendpage = sock_no_sendpage,
529 static const struct proto_ops unix_dgram_ops = {
530 .family = PF_UNIX,
531 .owner = THIS_MODULE,
532 .release = unix_release,
533 .bind = unix_bind,
534 .connect = unix_dgram_connect,
535 .socketpair = unix_socketpair,
536 .accept = sock_no_accept,
537 .getname = unix_getname,
538 .poll = unix_dgram_poll,
539 .ioctl = unix_ioctl,
540 .listen = sock_no_listen,
541 .shutdown = unix_shutdown,
542 .setsockopt = sock_no_setsockopt,
543 .getsockopt = sock_no_getsockopt,
544 .sendmsg = unix_dgram_sendmsg,
545 .recvmsg = unix_dgram_recvmsg,
546 .mmap = sock_no_mmap,
547 .sendpage = sock_no_sendpage,
550 static const struct proto_ops unix_seqpacket_ops = {
551 .family = PF_UNIX,
552 .owner = THIS_MODULE,
553 .release = unix_release,
554 .bind = unix_bind,
555 .connect = unix_stream_connect,
556 .socketpair = unix_socketpair,
557 .accept = unix_accept,
558 .getname = unix_getname,
559 .poll = unix_dgram_poll,
560 .ioctl = unix_ioctl,
561 .listen = unix_listen,
562 .shutdown = unix_shutdown,
563 .setsockopt = sock_no_setsockopt,
564 .getsockopt = sock_no_getsockopt,
565 .sendmsg = unix_seqpacket_sendmsg,
566 .recvmsg = unix_dgram_recvmsg,
567 .mmap = sock_no_mmap,
568 .sendpage = sock_no_sendpage,
571 static struct proto unix_proto = {
572 .name = "UNIX",
573 .owner = THIS_MODULE,
574 .obj_size = sizeof(struct unix_sock),
578 * AF_UNIX sockets do not interact with hardware, hence they
579 * dont trigger interrupts - so it's safe for them to have
580 * bh-unsafe locking for their sk_receive_queue.lock. Split off
581 * this special lock-class by reinitializing the spinlock key:
583 static struct lock_class_key af_unix_sk_receive_queue_lock_key;
585 static struct sock *unix_create1(struct net *net, struct socket *sock)
587 struct sock *sk = NULL;
588 struct unix_sock *u;
590 atomic_inc(&unix_nr_socks);
591 if (atomic_read(&unix_nr_socks) > 2 * get_max_files())
592 goto out;
594 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
595 if (!sk)
596 goto out;
598 sock_init_data(sock, sk);
599 lockdep_set_class(&sk->sk_receive_queue.lock,
600 &af_unix_sk_receive_queue_lock_key);
602 sk->sk_write_space = unix_write_space;
603 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
604 sk->sk_destruct = unix_sock_destructor;
605 u = unix_sk(sk);
606 u->dentry = NULL;
607 u->mnt = NULL;
608 spin_lock_init(&u->lock);
609 atomic_long_set(&u->inflight, 0);
610 INIT_LIST_HEAD(&u->link);
611 mutex_init(&u->readlock); /* single task reading lock */
612 init_waitqueue_head(&u->peer_wait);
613 unix_insert_socket(unix_sockets_unbound, sk);
614 out:
615 if (sk == NULL)
616 atomic_dec(&unix_nr_socks);
617 else {
618 local_bh_disable();
619 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
620 local_bh_enable();
622 return sk;
625 static int unix_create(struct net *net, struct socket *sock, int protocol,
626 int kern)
628 if (protocol && protocol != PF_UNIX)
629 return -EPROTONOSUPPORT;
631 sock->state = SS_UNCONNECTED;
633 switch (sock->type) {
634 case SOCK_STREAM:
635 sock->ops = &unix_stream_ops;
636 break;
638 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
639 * nothing uses it.
641 case SOCK_RAW:
642 sock->type = SOCK_DGRAM;
643 case SOCK_DGRAM:
644 sock->ops = &unix_dgram_ops;
645 break;
646 case SOCK_SEQPACKET:
647 sock->ops = &unix_seqpacket_ops;
648 break;
649 default:
650 return -ESOCKTNOSUPPORT;
653 return unix_create1(net, sock) ? 0 : -ENOMEM;
656 static int unix_release(struct socket *sock)
658 struct sock *sk = sock->sk;
660 if (!sk)
661 return 0;
663 sock->sk = NULL;
665 return unix_release_sock(sk, 0);
668 static int unix_autobind(struct socket *sock)
670 struct sock *sk = sock->sk;
671 struct net *net = sock_net(sk);
672 struct unix_sock *u = unix_sk(sk);
673 static u32 ordernum = 1;
674 struct unix_address *addr;
675 int err;
676 unsigned int retries = 0;
678 mutex_lock(&u->readlock);
680 err = 0;
681 if (u->addr)
682 goto out;
684 err = -ENOMEM;
685 addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
686 if (!addr)
687 goto out;
689 addr->name->sun_family = AF_UNIX;
690 atomic_set(&addr->refcnt, 1);
692 retry:
693 addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
694 addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
696 spin_lock(&unix_table_lock);
697 ordernum = (ordernum+1)&0xFFFFF;
699 if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
700 addr->hash)) {
701 spin_unlock(&unix_table_lock);
703 * __unix_find_socket_byname() may take long time if many names
704 * are already in use.
706 cond_resched();
707 /* Give up if all names seems to be in use. */
708 if (retries++ == 0xFFFFF) {
709 err = -ENOSPC;
710 kfree(addr);
711 goto out;
713 goto retry;
715 addr->hash ^= sk->sk_type;
717 __unix_remove_socket(sk);
718 u->addr = addr;
719 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
720 spin_unlock(&unix_table_lock);
721 err = 0;
723 out: mutex_unlock(&u->readlock);
724 return err;
727 static struct sock *unix_find_other(struct net *net,
728 struct sockaddr_un *sunname, int len,
729 int type, unsigned hash, int *error)
731 struct sock *u;
732 struct path path;
733 int err = 0;
735 if (sunname->sun_path[0]) {
736 struct inode *inode;
737 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
738 if (err)
739 goto fail;
740 inode = path.dentry->d_inode;
741 err = inode_permission(inode, MAY_WRITE);
742 if (err)
743 goto put_fail;
745 err = -ECONNREFUSED;
746 if (!S_ISSOCK(inode->i_mode))
747 goto put_fail;
748 u = unix_find_socket_byinode(net, inode);
749 if (!u)
750 goto put_fail;
752 if (u->sk_type == type)
753 touch_atime(path.mnt, path.dentry);
755 path_put(&path);
757 err = -EPROTOTYPE;
758 if (u->sk_type != type) {
759 sock_put(u);
760 goto fail;
762 } else {
763 err = -ECONNREFUSED;
764 u = unix_find_socket_byname(net, sunname, len, type, hash);
765 if (u) {
766 struct dentry *dentry;
767 dentry = unix_sk(u)->dentry;
768 if (dentry)
769 touch_atime(unix_sk(u)->mnt, dentry);
770 } else
771 goto fail;
773 return u;
775 put_fail:
776 path_put(&path);
777 fail:
778 *error = err;
779 return NULL;
783 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
785 struct sock *sk = sock->sk;
786 struct net *net = sock_net(sk);
787 struct unix_sock *u = unix_sk(sk);
788 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
789 struct dentry *dentry = NULL;
790 struct nameidata nd;
791 int err;
792 unsigned hash;
793 struct unix_address *addr;
794 struct hlist_head *list;
796 err = -EINVAL;
797 if (sunaddr->sun_family != AF_UNIX)
798 goto out;
800 if (addr_len == sizeof(short)) {
801 err = unix_autobind(sock);
802 goto out;
805 err = unix_mkname(sunaddr, addr_len, &hash);
806 if (err < 0)
807 goto out;
808 addr_len = err;
810 mutex_lock(&u->readlock);
812 err = -EINVAL;
813 if (u->addr)
814 goto out_up;
816 err = -ENOMEM;
817 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
818 if (!addr)
819 goto out_up;
821 memcpy(addr->name, sunaddr, addr_len);
822 addr->len = addr_len;
823 addr->hash = hash ^ sk->sk_type;
824 atomic_set(&addr->refcnt, 1);
826 if (sunaddr->sun_path[0]) {
827 unsigned int mode;
828 err = 0;
830 * Get the parent directory, calculate the hash for last
831 * component.
833 err = path_lookup(sunaddr->sun_path, LOOKUP_PARENT, &nd);
834 if (err)
835 goto out_mknod_parent;
837 dentry = lookup_create(&nd, 0);
838 err = PTR_ERR(dentry);
839 if (IS_ERR(dentry))
840 goto out_mknod_unlock;
843 * All right, let's create it.
845 mode = S_IFSOCK |
846 (SOCK_INODE(sock)->i_mode & ~current_umask());
847 err = mnt_want_write(nd.path.mnt);
848 if (err)
849 goto out_mknod_dput;
850 err = security_path_mknod(&nd.path, dentry, mode, 0);
851 if (err)
852 goto out_mknod_drop_write;
853 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
854 out_mknod_drop_write:
855 mnt_drop_write(nd.path.mnt);
856 if (err)
857 goto out_mknod_dput;
858 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
859 dput(nd.path.dentry);
860 nd.path.dentry = dentry;
862 addr->hash = UNIX_HASH_SIZE;
865 spin_lock(&unix_table_lock);
867 if (!sunaddr->sun_path[0]) {
868 err = -EADDRINUSE;
869 if (__unix_find_socket_byname(net, sunaddr, addr_len,
870 sk->sk_type, hash)) {
871 unix_release_addr(addr);
872 goto out_unlock;
875 list = &unix_socket_table[addr->hash];
876 } else {
877 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
878 u->dentry = nd.path.dentry;
879 u->mnt = nd.path.mnt;
882 err = 0;
883 __unix_remove_socket(sk);
884 u->addr = addr;
885 __unix_insert_socket(list, sk);
887 out_unlock:
888 spin_unlock(&unix_table_lock);
889 out_up:
890 mutex_unlock(&u->readlock);
891 out:
892 return err;
894 out_mknod_dput:
895 dput(dentry);
896 out_mknod_unlock:
897 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
898 path_put(&nd.path);
899 out_mknod_parent:
900 if (err == -EEXIST)
901 err = -EADDRINUSE;
902 unix_release_addr(addr);
903 goto out_up;
906 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
908 if (unlikely(sk1 == sk2) || !sk2) {
909 unix_state_lock(sk1);
910 return;
912 if (sk1 < sk2) {
913 unix_state_lock(sk1);
914 unix_state_lock_nested(sk2);
915 } else {
916 unix_state_lock(sk2);
917 unix_state_lock_nested(sk1);
921 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
923 if (unlikely(sk1 == sk2) || !sk2) {
924 unix_state_unlock(sk1);
925 return;
927 unix_state_unlock(sk1);
928 unix_state_unlock(sk2);
931 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
932 int alen, int flags)
934 struct sock *sk = sock->sk;
935 struct net *net = sock_net(sk);
936 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
937 struct sock *other;
938 unsigned hash;
939 int err;
941 if (addr->sa_family != AF_UNSPEC) {
942 err = unix_mkname(sunaddr, alen, &hash);
943 if (err < 0)
944 goto out;
945 alen = err;
947 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
948 !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
949 goto out;
951 restart:
952 other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
953 if (!other)
954 goto out;
956 unix_state_double_lock(sk, other);
958 /* Apparently VFS overslept socket death. Retry. */
959 if (sock_flag(other, SOCK_DEAD)) {
960 unix_state_double_unlock(sk, other);
961 sock_put(other);
962 goto restart;
965 err = -EPERM;
966 if (!unix_may_send(sk, other))
967 goto out_unlock;
969 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
970 if (err)
971 goto out_unlock;
973 } else {
975 * 1003.1g breaking connected state with AF_UNSPEC
977 other = NULL;
978 unix_state_double_lock(sk, other);
982 * If it was connected, reconnect.
984 if (unix_peer(sk)) {
985 struct sock *old_peer = unix_peer(sk);
986 unix_peer(sk) = other;
987 unix_state_double_unlock(sk, other);
989 if (other != old_peer)
990 unix_dgram_disconnected(sk, old_peer);
991 sock_put(old_peer);
992 } else {
993 unix_peer(sk) = other;
994 unix_state_double_unlock(sk, other);
996 return 0;
998 out_unlock:
999 unix_state_double_unlock(sk, other);
1000 sock_put(other);
1001 out:
1002 return err;
1005 static long unix_wait_for_peer(struct sock *other, long timeo)
1007 struct unix_sock *u = unix_sk(other);
1008 int sched;
1009 DEFINE_WAIT(wait);
1011 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1013 sched = !sock_flag(other, SOCK_DEAD) &&
1014 !(other->sk_shutdown & RCV_SHUTDOWN) &&
1015 unix_recvq_full(other);
1017 unix_state_unlock(other);
1019 if (sched)
1020 timeo = schedule_timeout(timeo);
1022 finish_wait(&u->peer_wait, &wait);
1023 return timeo;
1026 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1027 int addr_len, int flags)
1029 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1030 struct sock *sk = sock->sk;
1031 struct net *net = sock_net(sk);
1032 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1033 struct sock *newsk = NULL;
1034 struct sock *other = NULL;
1035 struct sk_buff *skb = NULL;
1036 unsigned hash;
1037 int st;
1038 int err;
1039 long timeo;
1041 err = unix_mkname(sunaddr, addr_len, &hash);
1042 if (err < 0)
1043 goto out;
1044 addr_len = err;
1046 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
1047 (err = unix_autobind(sock)) != 0)
1048 goto out;
1050 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1052 /* First of all allocate resources.
1053 If we will make it after state is locked,
1054 we will have to recheck all again in any case.
1057 err = -ENOMEM;
1059 /* create new sock for complete connection */
1060 newsk = unix_create1(sock_net(sk), NULL);
1061 if (newsk == NULL)
1062 goto out;
1064 /* Allocate skb for sending to listening sock */
1065 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1066 if (skb == NULL)
1067 goto out;
1069 restart:
1070 /* Find listening sock. */
1071 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
1072 if (!other)
1073 goto out;
1075 /* Latch state of peer */
1076 unix_state_lock(other);
1078 /* Apparently VFS overslept socket death. Retry. */
1079 if (sock_flag(other, SOCK_DEAD)) {
1080 unix_state_unlock(other);
1081 sock_put(other);
1082 goto restart;
1085 err = -ECONNREFUSED;
1086 if (other->sk_state != TCP_LISTEN)
1087 goto out_unlock;
1088 if (other->sk_shutdown & RCV_SHUTDOWN)
1089 goto out_unlock;
1091 if (unix_recvq_full(other)) {
1092 err = -EAGAIN;
1093 if (!timeo)
1094 goto out_unlock;
1096 timeo = unix_wait_for_peer(other, timeo);
1098 err = sock_intr_errno(timeo);
1099 if (signal_pending(current))
1100 goto out;
1101 sock_put(other);
1102 goto restart;
1105 /* Latch our state.
1107 It is tricky place. We need to grab write lock and cannot
1108 drop lock on peer. It is dangerous because deadlock is
1109 possible. Connect to self case and simultaneous
1110 attempt to connect are eliminated by checking socket
1111 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1112 check this before attempt to grab lock.
1114 Well, and we have to recheck the state after socket locked.
1116 st = sk->sk_state;
1118 switch (st) {
1119 case TCP_CLOSE:
1120 /* This is ok... continue with connect */
1121 break;
1122 case TCP_ESTABLISHED:
1123 /* Socket is already connected */
1124 err = -EISCONN;
1125 goto out_unlock;
1126 default:
1127 err = -EINVAL;
1128 goto out_unlock;
1131 unix_state_lock_nested(sk);
1133 if (sk->sk_state != st) {
1134 unix_state_unlock(sk);
1135 unix_state_unlock(other);
1136 sock_put(other);
1137 goto restart;
1140 err = security_unix_stream_connect(sock, other->sk_socket, newsk);
1141 if (err) {
1142 unix_state_unlock(sk);
1143 goto out_unlock;
1146 /* The way is open! Fastly set all the necessary fields... */
1148 sock_hold(sk);
1149 unix_peer(newsk) = sk;
1150 newsk->sk_state = TCP_ESTABLISHED;
1151 newsk->sk_type = sk->sk_type;
1152 newsk->sk_peercred.pid = task_tgid_vnr(current);
1153 current_euid_egid(&newsk->sk_peercred.uid, &newsk->sk_peercred.gid);
1154 newu = unix_sk(newsk);
1155 newsk->sk_wq = &newu->peer_wq;
1156 otheru = unix_sk(other);
1158 /* copy address information from listening to new sock*/
1159 if (otheru->addr) {
1160 atomic_inc(&otheru->addr->refcnt);
1161 newu->addr = otheru->addr;
1163 if (otheru->dentry) {
1164 newu->dentry = dget(otheru->dentry);
1165 newu->mnt = mntget(otheru->mnt);
1168 /* Set credentials */
1169 sk->sk_peercred = other->sk_peercred;
1171 sock->state = SS_CONNECTED;
1172 sk->sk_state = TCP_ESTABLISHED;
1173 sock_hold(newsk);
1175 smp_mb__after_atomic_inc(); /* sock_hold() does an atomic_inc() */
1176 unix_peer(sk) = newsk;
1178 unix_state_unlock(sk);
1180 /* take ten and and send info to listening sock */
1181 spin_lock(&other->sk_receive_queue.lock);
1182 __skb_queue_tail(&other->sk_receive_queue, skb);
1183 spin_unlock(&other->sk_receive_queue.lock);
1184 unix_state_unlock(other);
1185 other->sk_data_ready(other, 0);
1186 sock_put(other);
1187 return 0;
1189 out_unlock:
1190 if (other)
1191 unix_state_unlock(other);
1193 out:
1194 kfree_skb(skb);
1195 if (newsk)
1196 unix_release_sock(newsk, 0);
1197 if (other)
1198 sock_put(other);
1199 return err;
1202 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1204 struct sock *ska = socka->sk, *skb = sockb->sk;
1206 /* Join our sockets back to back */
1207 sock_hold(ska);
1208 sock_hold(skb);
1209 unix_peer(ska) = skb;
1210 unix_peer(skb) = ska;
1211 ska->sk_peercred.pid = skb->sk_peercred.pid = task_tgid_vnr(current);
1212 current_euid_egid(&skb->sk_peercred.uid, &skb->sk_peercred.gid);
1213 ska->sk_peercred.uid = skb->sk_peercred.uid;
1214 ska->sk_peercred.gid = skb->sk_peercred.gid;
1216 if (ska->sk_type != SOCK_DGRAM) {
1217 ska->sk_state = TCP_ESTABLISHED;
1218 skb->sk_state = TCP_ESTABLISHED;
1219 socka->state = SS_CONNECTED;
1220 sockb->state = SS_CONNECTED;
1222 return 0;
1225 static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1227 struct sock *sk = sock->sk;
1228 struct sock *tsk;
1229 struct sk_buff *skb;
1230 int err;
1232 err = -EOPNOTSUPP;
1233 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1234 goto out;
1236 err = -EINVAL;
1237 if (sk->sk_state != TCP_LISTEN)
1238 goto out;
1240 /* If socket state is TCP_LISTEN it cannot change (for now...),
1241 * so that no locks are necessary.
1244 skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1245 if (!skb) {
1246 /* This means receive shutdown. */
1247 if (err == 0)
1248 err = -EINVAL;
1249 goto out;
1252 tsk = skb->sk;
1253 skb_free_datagram(sk, skb);
1254 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1256 /* attach accepted sock to socket */
1257 unix_state_lock(tsk);
1258 newsock->state = SS_CONNECTED;
1259 sock_graft(tsk, newsock);
1260 unix_state_unlock(tsk);
1261 return 0;
1263 out:
1264 return err;
1268 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1270 struct sock *sk = sock->sk;
1271 struct unix_sock *u;
1272 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1273 int err = 0;
1275 if (peer) {
1276 sk = unix_peer_get(sk);
1278 err = -ENOTCONN;
1279 if (!sk)
1280 goto out;
1281 err = 0;
1282 } else {
1283 sock_hold(sk);
1286 u = unix_sk(sk);
1287 unix_state_lock(sk);
1288 if (!u->addr) {
1289 sunaddr->sun_family = AF_UNIX;
1290 sunaddr->sun_path[0] = 0;
1291 *uaddr_len = sizeof(short);
1292 } else {
1293 struct unix_address *addr = u->addr;
1295 *uaddr_len = addr->len;
1296 memcpy(sunaddr, addr->name, *uaddr_len);
1298 unix_state_unlock(sk);
1299 sock_put(sk);
1300 out:
1301 return err;
1304 static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1306 int i;
1308 scm->fp = UNIXCB(skb).fp;
1309 UNIXCB(skb).fp = NULL;
1311 for (i = scm->fp->count-1; i >= 0; i--)
1312 unix_notinflight(scm->fp->fp[i]);
1315 static void unix_destruct_scm(struct sk_buff *skb)
1317 struct scm_cookie scm;
1318 memset(&scm, 0, sizeof(scm));
1319 scm.pid = UNIXCB(skb).pid;
1320 scm.cred = UNIXCB(skb).cred;
1321 if (UNIXCB(skb).fp)
1322 unix_detach_fds(&scm, skb);
1324 /* Alas, it calls VFS */
1325 /* So fscking what? fput() had been SMP-safe since the last Summer */
1326 scm_destroy(&scm);
1327 sock_wfree(skb);
1330 #define MAX_RECURSION_LEVEL 4
1332 static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1334 int i;
1335 unsigned char max_level = 0;
1336 int unix_sock_count = 0;
1338 for (i = scm->fp->count - 1; i >= 0; i--) {
1339 struct sock *sk = unix_get_socket(scm->fp->fp[i]);
1341 if (sk) {
1342 unix_sock_count++;
1343 max_level = max(max_level,
1344 unix_sk(sk)->recursion_level);
1347 if (unlikely(max_level > MAX_RECURSION_LEVEL))
1348 return -ETOOMANYREFS;
1351 * Need to duplicate file references for the sake of garbage
1352 * collection. Otherwise a socket in the fps might become a
1353 * candidate for GC while the skb is not yet queued.
1355 UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1356 if (!UNIXCB(skb).fp)
1357 return -ENOMEM;
1359 if (unix_sock_count) {
1360 for (i = scm->fp->count - 1; i >= 0; i--)
1361 unix_inflight(scm->fp->fp[i]);
1363 return max_level;
1366 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1368 int err = 0;
1369 UNIXCB(skb).pid = get_pid(scm->pid);
1370 UNIXCB(skb).cred = get_cred(scm->cred);
1371 UNIXCB(skb).fp = NULL;
1372 if (scm->fp && send_fds)
1373 err = unix_attach_fds(scm, skb);
1375 skb->destructor = unix_destruct_scm;
1376 return err;
1380 * Send AF_UNIX data.
1383 static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1384 struct msghdr *msg, size_t len)
1386 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1387 struct sock *sk = sock->sk;
1388 struct net *net = sock_net(sk);
1389 struct unix_sock *u = unix_sk(sk);
1390 struct sockaddr_un *sunaddr = msg->msg_name;
1391 struct sock *other = NULL;
1392 int namelen = 0; /* fake GCC */
1393 int err;
1394 unsigned hash;
1395 struct sk_buff *skb;
1396 long timeo;
1397 struct scm_cookie tmp_scm;
1398 int max_level;
1400 if (NULL == siocb->scm)
1401 siocb->scm = &tmp_scm;
1402 wait_for_unix_gc();
1403 err = scm_send(sock, msg, siocb->scm);
1404 if (err < 0)
1405 return err;
1407 err = -EOPNOTSUPP;
1408 if (msg->msg_flags&MSG_OOB)
1409 goto out;
1411 if (msg->msg_namelen) {
1412 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1413 if (err < 0)
1414 goto out;
1415 namelen = err;
1416 } else {
1417 sunaddr = NULL;
1418 err = -ENOTCONN;
1419 other = unix_peer_get(sk);
1420 if (!other)
1421 goto out;
1424 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
1425 && (err = unix_autobind(sock)) != 0)
1426 goto out;
1428 err = -EMSGSIZE;
1429 if (len > sk->sk_sndbuf - 32)
1430 goto out;
1432 skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err);
1433 if (skb == NULL)
1434 goto out;
1436 err = unix_scm_to_skb(siocb->scm, skb, true);
1437 if (err < 0)
1438 goto out_free;
1439 max_level = err + 1;
1440 unix_get_secdata(siocb->scm, skb);
1442 skb_reset_transport_header(skb);
1443 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
1444 if (err)
1445 goto out_free;
1447 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1449 restart:
1450 if (!other) {
1451 err = -ECONNRESET;
1452 if (sunaddr == NULL)
1453 goto out_free;
1455 other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
1456 hash, &err);
1457 if (other == NULL)
1458 goto out_free;
1461 unix_state_lock(other);
1462 err = -EPERM;
1463 if (!unix_may_send(sk, other))
1464 goto out_unlock;
1466 if (sock_flag(other, SOCK_DEAD)) {
1468 * Check with 1003.1g - what should
1469 * datagram error
1471 unix_state_unlock(other);
1472 sock_put(other);
1474 err = 0;
1475 unix_state_lock(sk);
1476 if (unix_peer(sk) == other) {
1477 unix_peer(sk) = NULL;
1478 unix_state_unlock(sk);
1480 unix_dgram_disconnected(sk, other);
1481 sock_put(other);
1482 err = -ECONNREFUSED;
1483 } else {
1484 unix_state_unlock(sk);
1487 other = NULL;
1488 if (err)
1489 goto out_free;
1490 goto restart;
1493 err = -EPIPE;
1494 if (other->sk_shutdown & RCV_SHUTDOWN)
1495 goto out_unlock;
1497 if (sk->sk_type != SOCK_SEQPACKET) {
1498 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1499 if (err)
1500 goto out_unlock;
1503 if (unix_peer(other) != sk && unix_recvq_full(other)) {
1504 if (!timeo) {
1505 err = -EAGAIN;
1506 goto out_unlock;
1509 timeo = unix_wait_for_peer(other, timeo);
1511 err = sock_intr_errno(timeo);
1512 if (signal_pending(current))
1513 goto out_free;
1515 goto restart;
1518 skb_queue_tail(&other->sk_receive_queue, skb);
1519 if (max_level > unix_sk(other)->recursion_level)
1520 unix_sk(other)->recursion_level = max_level;
1521 unix_state_unlock(other);
1522 other->sk_data_ready(other, len);
1523 sock_put(other);
1524 scm_destroy(siocb->scm);
1525 return len;
1527 out_unlock:
1528 unix_state_unlock(other);
1529 out_free:
1530 kfree_skb(skb);
1531 out:
1532 if (other)
1533 sock_put(other);
1534 scm_destroy(siocb->scm);
1535 return err;
1539 static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1540 struct msghdr *msg, size_t len)
1542 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1543 struct sock *sk = sock->sk;
1544 struct sock *other = NULL;
1545 struct sockaddr_un *sunaddr = msg->msg_name;
1546 int err, size;
1547 struct sk_buff *skb;
1548 int sent = 0;
1549 struct scm_cookie tmp_scm;
1550 bool fds_sent = false;
1551 int max_level;
1553 if (NULL == siocb->scm)
1554 siocb->scm = &tmp_scm;
1555 wait_for_unix_gc();
1556 err = scm_send(sock, msg, siocb->scm);
1557 if (err < 0)
1558 return err;
1560 err = -EOPNOTSUPP;
1561 if (msg->msg_flags&MSG_OOB)
1562 goto out_err;
1564 if (msg->msg_namelen) {
1565 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1566 goto out_err;
1567 } else {
1568 sunaddr = NULL;
1569 err = -ENOTCONN;
1570 other = unix_peer(sk);
1571 if (!other)
1572 goto out_err;
1575 if (sk->sk_shutdown & SEND_SHUTDOWN)
1576 goto pipe_err;
1578 while (sent < len) {
1580 * Optimisation for the fact that under 0.01% of X
1581 * messages typically need breaking up.
1584 size = len-sent;
1586 /* Keep two messages in the pipe so it schedules better */
1587 if (size > ((sk->sk_sndbuf >> 1) - 64))
1588 size = (sk->sk_sndbuf >> 1) - 64;
1590 if (size > SKB_MAX_ALLOC)
1591 size = SKB_MAX_ALLOC;
1594 * Grab a buffer
1597 skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT,
1598 &err);
1600 if (skb == NULL)
1601 goto out_err;
1604 * If you pass two values to the sock_alloc_send_skb
1605 * it tries to grab the large buffer with GFP_NOFS
1606 * (which can fail easily), and if it fails grab the
1607 * fallback size buffer which is under a page and will
1608 * succeed. [Alan]
1610 size = min_t(int, size, skb_tailroom(skb));
1613 /* Only send the fds in the first buffer */
1614 err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
1615 if (err < 0) {
1616 kfree_skb(skb);
1617 goto out_err;
1619 max_level = err + 1;
1620 fds_sent = true;
1622 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
1623 if (err) {
1624 kfree_skb(skb);
1625 goto out_err;
1628 unix_state_lock(other);
1630 if (sock_flag(other, SOCK_DEAD) ||
1631 (other->sk_shutdown & RCV_SHUTDOWN))
1632 goto pipe_err_free;
1634 skb_queue_tail(&other->sk_receive_queue, skb);
1635 if (max_level > unix_sk(other)->recursion_level)
1636 unix_sk(other)->recursion_level = max_level;
1637 unix_state_unlock(other);
1638 other->sk_data_ready(other, size);
1639 sent += size;
1642 scm_destroy(siocb->scm);
1643 siocb->scm = NULL;
1645 return sent;
1647 pipe_err_free:
1648 unix_state_unlock(other);
1649 kfree_skb(skb);
1650 pipe_err:
1651 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
1652 send_sig(SIGPIPE, current, 0);
1653 err = -EPIPE;
1654 out_err:
1655 scm_destroy(siocb->scm);
1656 siocb->scm = NULL;
1657 return sent ? : err;
1660 static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
1661 struct msghdr *msg, size_t len)
1663 int err;
1664 struct sock *sk = sock->sk;
1666 err = sock_error(sk);
1667 if (err)
1668 return err;
1670 if (sk->sk_state != TCP_ESTABLISHED)
1671 return -ENOTCONN;
1673 if (msg->msg_namelen)
1674 msg->msg_namelen = 0;
1676 return unix_dgram_sendmsg(kiocb, sock, msg, len);
1679 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1681 struct unix_sock *u = unix_sk(sk);
1683 msg->msg_namelen = 0;
1684 if (u->addr) {
1685 msg->msg_namelen = u->addr->len;
1686 memcpy(msg->msg_name, u->addr->name, u->addr->len);
1690 static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1691 struct msghdr *msg, size_t size,
1692 int flags)
1694 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1695 struct scm_cookie tmp_scm;
1696 struct sock *sk = sock->sk;
1697 struct unix_sock *u = unix_sk(sk);
1698 int noblock = flags & MSG_DONTWAIT;
1699 struct sk_buff *skb;
1700 int err;
1702 err = -EOPNOTSUPP;
1703 if (flags&MSG_OOB)
1704 goto out;
1706 msg->msg_namelen = 0;
1708 mutex_lock(&u->readlock);
1710 skb = skb_recv_datagram(sk, flags, noblock, &err);
1711 if (!skb) {
1712 unix_state_lock(sk);
1713 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
1714 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
1715 (sk->sk_shutdown & RCV_SHUTDOWN))
1716 err = 0;
1717 unix_state_unlock(sk);
1718 goto out_unlock;
1721 wake_up_interruptible_sync(&u->peer_wait);
1723 if (msg->msg_name)
1724 unix_copy_addr(msg, skb->sk);
1726 if (size > skb->len)
1727 size = skb->len;
1728 else if (size < skb->len)
1729 msg->msg_flags |= MSG_TRUNC;
1731 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, size);
1732 if (err)
1733 goto out_free;
1735 if (!siocb->scm) {
1736 siocb->scm = &tmp_scm;
1737 memset(&tmp_scm, 0, sizeof(tmp_scm));
1739 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
1740 unix_set_secdata(siocb->scm, skb);
1742 if (!(flags & MSG_PEEK)) {
1743 if (UNIXCB(skb).fp)
1744 unix_detach_fds(siocb->scm, skb);
1745 } else {
1746 /* It is questionable: on PEEK we could:
1747 - do not return fds - good, but too simple 8)
1748 - return fds, and do not return them on read (old strategy,
1749 apparently wrong)
1750 - clone fds (I chose it for now, it is the most universal
1751 solution)
1753 POSIX 1003.1g does not actually define this clearly
1754 at all. POSIX 1003.1g doesn't define a lot of things
1755 clearly however!
1758 if (UNIXCB(skb).fp)
1759 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1761 err = size;
1763 scm_recv(sock, msg, siocb->scm, flags);
1765 out_free:
1766 skb_free_datagram(sk, skb);
1767 out_unlock:
1768 mutex_unlock(&u->readlock);
1769 out:
1770 return err;
1774 * Sleep until data has arrive. But check for races..
1777 static long unix_stream_data_wait(struct sock *sk, long timeo)
1779 DEFINE_WAIT(wait);
1781 unix_state_lock(sk);
1783 for (;;) {
1784 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1786 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1787 sk->sk_err ||
1788 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1789 signal_pending(current) ||
1790 !timeo)
1791 break;
1793 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1794 unix_state_unlock(sk);
1795 timeo = schedule_timeout(timeo);
1796 unix_state_lock(sk);
1797 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1800 finish_wait(sk_sleep(sk), &wait);
1801 unix_state_unlock(sk);
1802 return timeo;
1807 static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1808 struct msghdr *msg, size_t size,
1809 int flags)
1811 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1812 struct scm_cookie tmp_scm;
1813 struct sock *sk = sock->sk;
1814 struct unix_sock *u = unix_sk(sk);
1815 struct sockaddr_un *sunaddr = msg->msg_name;
1816 int copied = 0;
1817 int check_creds = 0;
1818 int target;
1819 int err = 0;
1820 long timeo;
1822 err = -EINVAL;
1823 if (sk->sk_state != TCP_ESTABLISHED)
1824 goto out;
1826 err = -EOPNOTSUPP;
1827 if (flags&MSG_OOB)
1828 goto out;
1830 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
1831 timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
1833 msg->msg_namelen = 0;
1835 /* Lock the socket to prevent queue disordering
1836 * while sleeps in memcpy_tomsg
1839 if (!siocb->scm) {
1840 siocb->scm = &tmp_scm;
1841 memset(&tmp_scm, 0, sizeof(tmp_scm));
1844 mutex_lock(&u->readlock);
1846 do {
1847 int chunk;
1848 struct sk_buff *skb;
1850 unix_state_lock(sk);
1851 skb = skb_dequeue(&sk->sk_receive_queue);
1852 if (skb == NULL) {
1853 unix_sk(sk)->recursion_level = 0;
1854 if (copied >= target)
1855 goto unlock;
1858 * POSIX 1003.1g mandates this order.
1861 err = sock_error(sk);
1862 if (err)
1863 goto unlock;
1864 if (sk->sk_shutdown & RCV_SHUTDOWN)
1865 goto unlock;
1867 unix_state_unlock(sk);
1868 err = -EAGAIN;
1869 if (!timeo)
1870 break;
1871 mutex_unlock(&u->readlock);
1873 timeo = unix_stream_data_wait(sk, timeo);
1875 if (signal_pending(current)) {
1876 err = sock_intr_errno(timeo);
1877 goto out;
1879 mutex_lock(&u->readlock);
1880 continue;
1881 unlock:
1882 unix_state_unlock(sk);
1883 break;
1885 unix_state_unlock(sk);
1887 if (check_creds) {
1888 /* Never glue messages from different writers */
1889 if ((UNIXCB(skb).pid != siocb->scm->pid) ||
1890 (UNIXCB(skb).cred != siocb->scm->cred)) {
1891 skb_queue_head(&sk->sk_receive_queue, skb);
1892 break;
1894 } else {
1895 /* Copy credentials */
1896 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
1897 check_creds = 1;
1900 /* Copy address just once */
1901 if (sunaddr) {
1902 unix_copy_addr(msg, skb->sk);
1903 sunaddr = NULL;
1906 chunk = min_t(unsigned int, skb->len, size);
1907 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
1908 skb_queue_head(&sk->sk_receive_queue, skb);
1909 if (copied == 0)
1910 copied = -EFAULT;
1911 break;
1913 copied += chunk;
1914 size -= chunk;
1916 /* Mark read part of skb as used */
1917 if (!(flags & MSG_PEEK)) {
1918 skb_pull(skb, chunk);
1920 if (UNIXCB(skb).fp)
1921 unix_detach_fds(siocb->scm, skb);
1923 /* put the skb back if we didn't use it up.. */
1924 if (skb->len) {
1925 skb_queue_head(&sk->sk_receive_queue, skb);
1926 break;
1929 kfree_skb(skb);
1931 if (siocb->scm->fp)
1932 break;
1933 } else {
1934 /* It is questionable, see note in unix_dgram_recvmsg.
1936 if (UNIXCB(skb).fp)
1937 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1939 /* put message back and return */
1940 skb_queue_head(&sk->sk_receive_queue, skb);
1941 break;
1943 } while (size);
1945 mutex_unlock(&u->readlock);
1946 scm_recv(sock, msg, siocb->scm, flags);
1947 out:
1948 return copied ? : err;
1951 static int unix_shutdown(struct socket *sock, int mode)
1953 struct sock *sk = sock->sk;
1954 struct sock *other;
1956 mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN);
1958 if (mode) {
1959 unix_state_lock(sk);
1960 sk->sk_shutdown |= mode;
1961 other = unix_peer(sk);
1962 if (other)
1963 sock_hold(other);
1964 unix_state_unlock(sk);
1965 sk->sk_state_change(sk);
1967 if (other &&
1968 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
1970 int peer_mode = 0;
1972 if (mode&RCV_SHUTDOWN)
1973 peer_mode |= SEND_SHUTDOWN;
1974 if (mode&SEND_SHUTDOWN)
1975 peer_mode |= RCV_SHUTDOWN;
1976 unix_state_lock(other);
1977 other->sk_shutdown |= peer_mode;
1978 unix_state_unlock(other);
1979 other->sk_state_change(other);
1980 if (peer_mode == SHUTDOWN_MASK)
1981 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
1982 else if (peer_mode & RCV_SHUTDOWN)
1983 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
1985 if (other)
1986 sock_put(other);
1988 return 0;
1991 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1993 struct sock *sk = sock->sk;
1994 long amount = 0;
1995 int err;
1997 switch (cmd) {
1998 case SIOCOUTQ:
1999 amount = sk_wmem_alloc_get(sk);
2000 err = put_user(amount, (int __user *)arg);
2001 break;
2002 case SIOCINQ:
2004 struct sk_buff *skb;
2006 if (sk->sk_state == TCP_LISTEN) {
2007 err = -EINVAL;
2008 break;
2011 spin_lock(&sk->sk_receive_queue.lock);
2012 if (sk->sk_type == SOCK_STREAM ||
2013 sk->sk_type == SOCK_SEQPACKET) {
2014 skb_queue_walk(&sk->sk_receive_queue, skb)
2015 amount += skb->len;
2016 } else {
2017 skb = skb_peek(&sk->sk_receive_queue);
2018 if (skb)
2019 amount = skb->len;
2021 spin_unlock(&sk->sk_receive_queue.lock);
2022 err = put_user(amount, (int __user *)arg);
2023 break;
2026 default:
2027 err = -ENOIOCTLCMD;
2028 break;
2030 return err;
2033 static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
2035 struct sock *sk = sock->sk;
2036 unsigned int mask;
2038 sock_poll_wait(file, sk_sleep(sk), wait);
2039 mask = 0;
2041 /* exceptional events? */
2042 if (sk->sk_err)
2043 mask |= POLLERR;
2044 if (sk->sk_shutdown == SHUTDOWN_MASK)
2045 mask |= POLLHUP;
2046 if (sk->sk_shutdown & RCV_SHUTDOWN)
2047 mask |= POLLRDHUP;
2049 /* readable? */
2050 if (!skb_queue_empty(&sk->sk_receive_queue) ||
2051 (sk->sk_shutdown & RCV_SHUTDOWN))
2052 mask |= POLLIN | POLLRDNORM;
2054 /* Connection-based need to check for termination and startup */
2055 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
2056 sk->sk_state == TCP_CLOSE)
2057 mask |= POLLHUP;
2060 * we set writable also when the other side has shut down the
2061 * connection. This prevents stuck sockets.
2063 if (unix_writable(sk))
2064 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2066 return mask;
2069 static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2070 poll_table *wait)
2072 struct sock *sk = sock->sk, *other;
2073 unsigned int mask, writable;
2075 sock_poll_wait(file, sk_sleep(sk), wait);
2076 mask = 0;
2078 /* exceptional events? */
2079 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
2080 mask |= POLLERR;
2081 if (sk->sk_shutdown & RCV_SHUTDOWN)
2082 mask |= POLLRDHUP;
2083 if (sk->sk_shutdown == SHUTDOWN_MASK)
2084 mask |= POLLHUP;
2086 /* readable? */
2087 if (!skb_queue_empty(&sk->sk_receive_queue) ||
2088 (sk->sk_shutdown & RCV_SHUTDOWN))
2089 mask |= POLLIN | POLLRDNORM;
2091 /* Connection-based need to check for termination and startup */
2092 if (sk->sk_type == SOCK_SEQPACKET) {
2093 if (sk->sk_state == TCP_CLOSE)
2094 mask |= POLLHUP;
2095 /* connection hasn't started yet? */
2096 if (sk->sk_state == TCP_SYN_SENT)
2097 return mask;
2100 /* writable? */
2101 writable = unix_writable(sk);
2102 if (writable) {
2103 other = unix_peer_get(sk);
2104 if (other) {
2105 if (unix_peer(other) != sk) {
2106 sock_poll_wait(file, &unix_sk(other)->peer_wait,
2107 wait);
2108 if (unix_recvq_full(other))
2109 writable = 0;
2112 sock_put(other);
2116 if (writable)
2117 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2118 else
2119 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
2121 return mask;
2124 #ifdef CONFIG_PROC_FS
2125 static struct sock *first_unix_socket(int *i)
2127 for (*i = 0; *i <= UNIX_HASH_SIZE; (*i)++) {
2128 if (!hlist_empty(&unix_socket_table[*i]))
2129 return __sk_head(&unix_socket_table[*i]);
2131 return NULL;
2134 static struct sock *next_unix_socket(int *i, struct sock *s)
2136 struct sock *next = sk_next(s);
2137 /* More in this chain? */
2138 if (next)
2139 return next;
2140 /* Look for next non-empty chain. */
2141 for ((*i)++; *i <= UNIX_HASH_SIZE; (*i)++) {
2142 if (!hlist_empty(&unix_socket_table[*i]))
2143 return __sk_head(&unix_socket_table[*i]);
2145 return NULL;
2148 struct unix_iter_state {
2149 struct seq_net_private p;
2150 int i;
2153 static struct sock *unix_seq_idx(struct seq_file *seq, loff_t pos)
2155 struct unix_iter_state *iter = seq->private;
2156 loff_t off = 0;
2157 struct sock *s;
2159 for (s = first_unix_socket(&iter->i); s; s = next_unix_socket(&iter->i, s)) {
2160 if (sock_net(s) != seq_file_net(seq))
2161 continue;
2162 if (off == pos)
2163 return s;
2164 ++off;
2166 return NULL;
2169 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
2170 __acquires(unix_table_lock)
2172 spin_lock(&unix_table_lock);
2173 return *pos ? unix_seq_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2176 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2178 struct unix_iter_state *iter = seq->private;
2179 struct sock *sk = v;
2180 ++*pos;
2182 if (v == SEQ_START_TOKEN)
2183 sk = first_unix_socket(&iter->i);
2184 else
2185 sk = next_unix_socket(&iter->i, sk);
2186 while (sk && (sock_net(sk) != seq_file_net(seq)))
2187 sk = next_unix_socket(&iter->i, sk);
2188 return sk;
2191 static void unix_seq_stop(struct seq_file *seq, void *v)
2192 __releases(unix_table_lock)
2194 spin_unlock(&unix_table_lock);
2197 static int unix_seq_show(struct seq_file *seq, void *v)
2200 if (v == SEQ_START_TOKEN)
2201 seq_puts(seq, "Num RefCount Protocol Flags Type St "
2202 "Inode Path\n");
2203 else {
2204 struct sock *s = v;
2205 struct unix_sock *u = unix_sk(s);
2206 unix_state_lock(s);
2208 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
2210 atomic_read(&s->sk_refcnt),
2212 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2213 s->sk_type,
2214 s->sk_socket ?
2215 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2216 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2217 sock_i_ino(s));
2219 if (u->addr) {
2220 int i, len;
2221 seq_putc(seq, ' ');
2223 i = 0;
2224 len = u->addr->len - sizeof(short);
2225 if (!UNIX_ABSTRACT(s))
2226 len--;
2227 else {
2228 seq_putc(seq, '@');
2229 i++;
2231 for ( ; i < len; i++)
2232 seq_putc(seq, u->addr->name->sun_path[i]);
2234 unix_state_unlock(s);
2235 seq_putc(seq, '\n');
2238 return 0;
2241 static const struct seq_operations unix_seq_ops = {
2242 .start = unix_seq_start,
2243 .next = unix_seq_next,
2244 .stop = unix_seq_stop,
2245 .show = unix_seq_show,
2248 static int unix_seq_open(struct inode *inode, struct file *file)
2250 return seq_open_net(inode, file, &unix_seq_ops,
2251 sizeof(struct unix_iter_state));
2254 static const struct file_operations unix_seq_fops = {
2255 .owner = THIS_MODULE,
2256 .open = unix_seq_open,
2257 .read = seq_read,
2258 .llseek = seq_lseek,
2259 .release = seq_release_net,
2262 #endif
2264 static const struct net_proto_family unix_family_ops = {
2265 .family = PF_UNIX,
2266 .create = unix_create,
2267 .owner = THIS_MODULE,
2271 static int __net_init unix_net_init(struct net *net)
2273 int error = -ENOMEM;
2275 net->unx.sysctl_max_dgram_qlen = 10;
2276 if (unix_sysctl_register(net))
2277 goto out;
2279 #ifdef CONFIG_PROC_FS
2280 if (!proc_net_fops_create(net, "unix", 0, &unix_seq_fops)) {
2281 unix_sysctl_unregister(net);
2282 goto out;
2284 #endif
2285 error = 0;
2286 out:
2287 return error;
2290 static void __net_exit unix_net_exit(struct net *net)
2292 unix_sysctl_unregister(net);
2293 proc_net_remove(net, "unix");
2296 static struct pernet_operations unix_net_ops = {
2297 .init = unix_net_init,
2298 .exit = unix_net_exit,
2301 static int __init af_unix_init(void)
2303 int rc = -1;
2304 struct sk_buff *dummy_skb;
2306 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof(dummy_skb->cb));
2308 rc = proto_register(&unix_proto, 1);
2309 if (rc != 0) {
2310 printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n",
2311 __func__);
2312 goto out;
2315 sock_register(&unix_family_ops);
2316 register_pernet_subsys(&unix_net_ops);
2317 out:
2318 return rc;
2321 static void __exit af_unix_exit(void)
2323 sock_unregister(PF_UNIX);
2324 proto_unregister(&unix_proto);
2325 unregister_pernet_subsys(&unix_net_ops);
2328 /* Earlier than device_initcall() so that other drivers invoking
2329 request_module() don't end up in a loop when modprobe tries
2330 to use a UNIX socket. But later than subsys_initcall() because
2331 we depend on stuff initialised there */
2332 fs_initcall(af_unix_init);
2333 module_exit(af_unix_exit);
2335 MODULE_LICENSE("GPL");
2336 MODULE_ALIAS_NETPROTO(PF_UNIX);