allow coexistance of N build and AC build.
[tomato.git] / release / src-rt-6.x / linux / linux-2.6 / net / unix / garbage.c
blobaa4bd705a1606b323cf5b20a10fd4c4c68f68d08
1 /*
2 * NET3: Garbage Collector For AF_UNIX sockets
4 * Garbage Collector:
5 * Copyright (C) Barak A. Pearlmutter.
6 * Released under the GPL version 2 or later.
8 * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
9 * If it doesn't work blame me, it worked when Barak sent it.
11 * Assumptions:
13 * - object w/ a bit
14 * - free list
16 * Current optimizations:
18 * - explicit stack instead of recursion
19 * - tail recurse on first born instead of immediate push/pop
20 * - we gather the stuff that should not be killed into tree
21 * and stack is just a path from root to the current pointer.
23 * Future optimizations:
25 * - don't just push entire root set; process in place
27 * This program is free software; you can redistribute it and/or
28 * modify it under the terms of the GNU General Public License
29 * as published by the Free Software Foundation; either version
30 * 2 of the License, or (at your option) any later version.
32 * Fixes:
33 * Alan Cox 07 Sept 1997 Vmalloc internal stack as needed.
34 * Cope with changing max_files.
35 * Al Viro 11 Oct 1998
36 * Graph may have cycles. That is, we can send the descriptor
37 * of foo to bar and vice versa. Current code chokes on that.
38 * Fix: move SCM_RIGHTS ones into the separate list and then
39 * skb_free() them all instead of doing explicit fput's.
40 * Another problem: since fput() may block somebody may
41 * create a new unix_socket when we are in the middle of sweep
42 * phase. Fix: revert the logic wrt MARKED. Mark everything
43 * upon the beginning and unmark non-junk ones.
45 * [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
46 * sent to connect()'ed but still not accept()'ed sockets.
47 * Fixed. Old code had slightly different problem here:
48 * extra fput() in situation when we passed the descriptor via
49 * such socket and closed it (descriptor). That would happen on
50 * each unix_gc() until the accept(). Since the struct file in
51 * question would go to the free list and might be reused...
52 * That might be the reason of random oopses on filp_close()
53 * in unrelated processes.
55 * AV 28 Feb 1999
56 * Kill the explicit allocation of stack. Now we keep the tree
57 * with root in dummy + pointer (gc_current) to one of the nodes.
58 * Stack is represented as path from gc_current to dummy. Unmark
59 * now means "add to tree". Push == "make it a son of gc_current".
60 * Pop == "move gc_current to parent". We keep only pointers to
61 * parents (->gc_tree).
62 * AV 1 Mar 1999
63 * Damn. Added missing check for ->dead in listen queues scanning.
65 * Miklos Szeredi 25 Jun 2007
66 * Reimplement with a cycle collecting algorithm. This should
67 * solve several problems with the previous code, like being racy
68 * wrt receive and holding up unrelated socket operations.
71 #include <linux/kernel.h>
72 #include <linux/string.h>
73 #include <linux/socket.h>
74 #include <linux/un.h>
75 #include <linux/net.h>
76 #include <linux/fs.h>
77 #include <linux/slab.h>
78 #include <linux/skbuff.h>
79 #include <linux/netdevice.h>
80 #include <linux/file.h>
81 #include <linux/proc_fs.h>
82 #include <linux/mutex.h>
83 #include <linux/wait.h>
85 #include <net/sock.h>
86 #include <net/af_unix.h>
87 #include <net/scm.h>
88 #include <net/tcp_states.h>
90 /* Internal data structures and random procedures: */
92 static LIST_HEAD(gc_inflight_list);
93 static LIST_HEAD(gc_candidates);
94 static DEFINE_SPINLOCK(unix_gc_lock);
95 static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
97 atomic_t unix_tot_inflight = ATOMIC_INIT(0);
100 static struct sock *unix_get_socket(struct file *filp)
102 struct sock *u_sock = NULL;
103 struct inode *inode = filp->f_path.dentry->d_inode;
106 * Socket ?
108 if (S_ISSOCK(inode->i_mode)) {
109 struct socket * sock = SOCKET_I(inode);
110 struct sock * s = sock->sk;
113 * PF_UNIX ?
115 if (s && sock->ops && sock->ops->family == PF_UNIX)
116 u_sock = s;
118 return u_sock;
122 * Keep the number of times in flight count for the file
123 * descriptor if it is for an AF_UNIX socket.
126 void unix_inflight(struct file *fp)
128 struct sock *s = unix_get_socket(fp);
129 if(s) {
130 struct unix_sock *u = unix_sk(s);
131 spin_lock(&unix_gc_lock);
132 if (atomic_inc_return(&u->inflight) == 1) {
133 BUG_ON(!list_empty(&u->link));
134 list_add_tail(&u->link, &gc_inflight_list);
135 } else {
136 BUG_ON(list_empty(&u->link));
138 atomic_inc(&unix_tot_inflight);
139 spin_unlock(&unix_gc_lock);
143 void unix_notinflight(struct file *fp)
145 struct sock *s = unix_get_socket(fp);
146 if(s) {
147 struct unix_sock *u = unix_sk(s);
148 spin_lock(&unix_gc_lock);
149 BUG_ON(list_empty(&u->link));
150 if (atomic_dec_and_test(&u->inflight))
151 list_del_init(&u->link);
152 atomic_dec(&unix_tot_inflight);
153 spin_unlock(&unix_gc_lock);
157 static inline struct sk_buff *sock_queue_head(struct sock *sk)
159 return (struct sk_buff *) &sk->sk_receive_queue;
162 #define receive_queue_for_each_skb(sk, next, skb) \
163 for (skb = sock_queue_head(sk)->next, next = skb->next; \
164 skb != sock_queue_head(sk); skb = next, next = skb->next)
166 static void scan_inflight(struct sock *x, void (*func)(struct sock *),
167 struct sk_buff_head *hitlist)
169 struct sk_buff *skb;
170 struct sk_buff *next;
172 spin_lock(&x->sk_receive_queue.lock);
173 receive_queue_for_each_skb(x, next, skb) {
175 * Do we have file descriptors ?
177 if (UNIXCB(skb).fp) {
178 bool hit = false;
180 * Process the descriptors of this socket
182 int nfd = UNIXCB(skb).fp->count;
183 struct file **fp = UNIXCB(skb).fp->fp;
184 while (nfd--) {
186 * Get the socket the fd matches
187 * if it indeed does so
189 struct sock *sk = unix_get_socket(*fp++);
190 if(sk) {
191 struct unix_sock *u = unix_sk(sk);
194 * Ignore non-candidates, they could
195 * have been added to the queues after
196 * starting the garbage collection
198 if (u->gc_candidate) {
199 hit = true;
200 func(sk);
204 if (hit && hitlist != NULL) {
205 __skb_unlink(skb, &x->sk_receive_queue);
206 __skb_queue_tail(hitlist, skb);
210 spin_unlock(&x->sk_receive_queue.lock);
213 static void scan_children(struct sock *x, void (*func)(struct sock *),
214 struct sk_buff_head *hitlist)
216 if (x->sk_state != TCP_LISTEN)
217 scan_inflight(x, func, hitlist);
218 else {
219 struct sk_buff *skb;
220 struct sk_buff *next;
221 struct unix_sock *u;
222 LIST_HEAD(embryos);
225 * For a listening socket collect the queued embryos
226 * and perform a scan on them as well.
228 spin_lock(&x->sk_receive_queue.lock);
229 receive_queue_for_each_skb(x, next, skb) {
230 u = unix_sk(skb->sk);
233 * An embryo cannot be in-flight, so it's safe
234 * to use the list link.
236 BUG_ON(!list_empty(&u->link));
237 list_add_tail(&u->link, &embryos);
239 spin_unlock(&x->sk_receive_queue.lock);
241 while (!list_empty(&embryos)) {
242 u = list_entry(embryos.next, struct unix_sock, link);
243 scan_inflight(&u->sk, func, hitlist);
244 list_del_init(&u->link);
249 static void dec_inflight(struct sock *sk)
251 atomic_dec(&unix_sk(sk)->inflight);
254 static void inc_inflight(struct sock *sk)
256 atomic_inc(&unix_sk(sk)->inflight);
259 static void inc_inflight_move_tail(struct sock *sk)
261 struct unix_sock *u = unix_sk(sk);
263 atomic_inc(&u->inflight);
265 * If this still might be part of a cycle, move it to the end
266 * of the list, so that it's checked even if it was already
267 * passed over
269 if (u->gc_maybe_cycle)
270 list_move_tail(&u->link, &gc_candidates);
273 static bool gc_in_progress = false;
275 void wait_for_unix_gc(void)
277 wait_event(unix_gc_wait, gc_in_progress == false);
280 /* The external entry point: unix_gc() */
281 void unix_gc(void)
283 struct unix_sock *u;
284 struct unix_sock *next;
285 struct sk_buff_head hitlist;
286 struct list_head cursor;
287 LIST_HEAD(not_cycle_list);
289 spin_lock(&unix_gc_lock);
291 /* Avoid a recursive GC. */
292 if (gc_in_progress)
293 goto out;
295 gc_in_progress = true;
297 * First, select candidates for garbage collection. Only
298 * in-flight sockets are considered, and from those only ones
299 * which don't have any external reference.
301 * Holding unix_gc_lock will protect these candidates from
302 * being detached, and hence from gaining an external
303 * reference. Since there are no possible receivers, all
304 * buffers currently on the candidates' queues stay there
305 * during the garbage collection.
307 * We also know that no new candidate can be added onto the
308 * receive queues. Other, non candidate sockets _can_ be
309 * added to queue, so we must make sure only to touch
310 * candidates.
312 list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
313 int total_refs;
314 int inflight_refs;
316 total_refs = file_count(u->sk.sk_socket->file);
317 inflight_refs = atomic_read(&u->inflight);
319 BUG_ON(inflight_refs < 1);
320 BUG_ON(total_refs < inflight_refs);
321 if (total_refs == inflight_refs) {
322 list_move_tail(&u->link, &gc_candidates);
323 u->gc_candidate = 1;
324 u->gc_maybe_cycle = 1;
329 * Now remove all internal in-flight reference to children of
330 * the candidates.
332 list_for_each_entry(u, &gc_candidates, link)
333 scan_children(&u->sk, dec_inflight, NULL);
336 * Restore the references for children of all candidates,
337 * which have remaining references. Do this recursively, so
338 * only those remain, which form cyclic references.
340 * Use a "cursor" link, to make the list traversal safe, even
341 * though elements might be moved about.
343 list_add(&cursor, &gc_candidates);
344 while (cursor.next != &gc_candidates) {
345 u = list_entry(cursor.next, struct unix_sock, link);
347 /* Move cursor to after the current position. */
348 list_move(&cursor, &u->link);
350 if (atomic_read(&u->inflight) > 0) {
351 list_move_tail(&u->link, &not_cycle_list);
352 u->gc_maybe_cycle = 0;
353 scan_children(&u->sk, inc_inflight_move_tail, NULL);
356 list_del(&cursor);
359 * not_cycle_list contains those sockets which do not make up a
360 * cycle. Restore these to the inflight list.
362 while (!list_empty(&not_cycle_list)) {
363 u = list_entry(not_cycle_list.next, struct unix_sock, link);
364 u->gc_candidate = 0;
365 list_move_tail(&u->link, &gc_inflight_list);
369 * Now gc_candidates contains only garbage. Restore original
370 * inflight counters for these as well, and remove the skbuffs
371 * which are creating the cycle(s).
373 skb_queue_head_init(&hitlist);
374 list_for_each_entry(u, &gc_candidates, link)
375 scan_children(&u->sk, inc_inflight, &hitlist);
377 spin_unlock(&unix_gc_lock);
379 /* Here we are. Hitlist is filled. Die. */
380 __skb_queue_purge(&hitlist);
382 spin_lock(&unix_gc_lock);
384 /* All candidates should have been detached by now. */
385 BUG_ON(!list_empty(&gc_candidates));
386 gc_in_progress = false;
387 wake_up(&unix_gc_wait);
389 out:
390 spin_unlock(&unix_gc_lock);