2 * NET3: Garbage Collector For AF_UNIX sockets
5 * Copyright (C) Barak A. Pearlmutter.
6 * Released under the GPL version 2 or later.
8 * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
9 * If it doesn't work blame me, it worked when Barak sent it.
16 * Current optimizations:
18 * - explicit stack instead of recursion
19 * - tail recurse on first born instead of immediate push/pop
20 * - we gather the stuff that should not be killed into tree
21 * and stack is just a path from root to the current pointer.
23 * Future optimizations:
25 * - don't just push entire root set; process in place
27 * This program is free software; you can redistribute it and/or
28 * modify it under the terms of the GNU General Public License
29 * as published by the Free Software Foundation; either version
30 * 2 of the License, or (at your option) any later version.
33 * Alan Cox 07 Sept 1997 Vmalloc internal stack as needed.
34 * Cope with changing max_files.
36 * Graph may have cycles. That is, we can send the descriptor
37 * of foo to bar and vice versa. Current code chokes on that.
38 * Fix: move SCM_RIGHTS ones into the separate list and then
39 * skb_free() them all instead of doing explicit fput's.
40 * Another problem: since fput() may block somebody may
41 * create a new unix_socket when we are in the middle of sweep
42 * phase. Fix: revert the logic wrt MARKED. Mark everything
43 * upon the beginning and unmark non-junk ones.
45 * [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
46 * sent to connect()'ed but still not accept()'ed sockets.
47 * Fixed. Old code had slightly different problem here:
48 * extra fput() in situation when we passed the descriptor via
49 * such socket and closed it (descriptor). That would happen on
50 * each unix_gc() until the accept(). Since the struct file in
51 * question would go to the free list and might be reused...
52 * That might be the reason of random oopses on filp_close()
53 * in unrelated processes.
56 * Kill the explicit allocation of stack. Now we keep the tree
57 * with root in dummy + pointer (gc_current) to one of the nodes.
58 * Stack is represented as path from gc_current to dummy. Unmark
59 * now means "add to tree". Push == "make it a son of gc_current".
60 * Pop == "move gc_current to parent". We keep only pointers to
61 * parents (->gc_tree).
63 * Damn. Added missing check for ->dead in listen queues scanning.
65 * Miklos Szeredi 25 Jun 2007
66 * Reimplement with a cycle collecting algorithm. This should
67 * solve several problems with the previous code, like being racy
68 * wrt receive and holding up unrelated socket operations.
71 #include <linux/kernel.h>
72 #include <linux/string.h>
73 #include <linux/socket.h>
75 #include <linux/net.h>
77 #include <linux/skbuff.h>
78 #include <linux/netdevice.h>
79 #include <linux/file.h>
80 #include <linux/proc_fs.h>
81 #include <linux/mutex.h>
82 #include <linux/wait.h>
85 #include <net/af_unix.h>
87 #include <net/tcp_states.h>
89 /* Internal data structures and random procedures: */
91 static LIST_HEAD(gc_inflight_list
);
92 static LIST_HEAD(gc_candidates
);
93 static DEFINE_SPINLOCK(unix_gc_lock
);
94 static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait
);
96 unsigned int unix_tot_inflight
;
99 struct sock
*unix_get_socket(struct file
*filp
)
101 struct sock
*u_sock
= NULL
;
102 struct inode
*inode
= file_inode(filp
);
107 if (S_ISSOCK(inode
->i_mode
) && !(filp
->f_mode
& FMODE_PATH
)) {
108 struct socket
*sock
= SOCKET_I(inode
);
109 struct sock
*s
= sock
->sk
;
114 if (s
&& sock
->ops
&& sock
->ops
->family
== PF_UNIX
)
121 * Keep the number of times in flight count for the file
122 * descriptor if it is for an AF_UNIX socket.
125 void unix_inflight(struct file
*fp
)
127 struct sock
*s
= unix_get_socket(fp
);
129 struct unix_sock
*u
= unix_sk(s
);
130 spin_lock(&unix_gc_lock
);
131 if (atomic_long_inc_return(&u
->inflight
) == 1) {
132 BUG_ON(!list_empty(&u
->link
));
133 list_add_tail(&u
->link
, &gc_inflight_list
);
135 BUG_ON(list_empty(&u
->link
));
138 spin_unlock(&unix_gc_lock
);
142 void unix_notinflight(struct file
*fp
)
144 struct sock
*s
= unix_get_socket(fp
);
146 struct unix_sock
*u
= unix_sk(s
);
147 spin_lock(&unix_gc_lock
);
148 BUG_ON(list_empty(&u
->link
));
149 if (atomic_long_dec_and_test(&u
->inflight
))
150 list_del_init(&u
->link
);
152 spin_unlock(&unix_gc_lock
);
156 static void scan_inflight(struct sock
*x
, void (*func
)(struct unix_sock
*),
157 struct sk_buff_head
*hitlist
)
160 struct sk_buff
*next
;
162 spin_lock(&x
->sk_receive_queue
.lock
);
163 skb_queue_walk_safe(&x
->sk_receive_queue
, skb
, next
) {
165 * Do we have file descriptors ?
167 if (UNIXCB(skb
).fp
) {
170 * Process the descriptors of this socket
172 int nfd
= UNIXCB(skb
).fp
->count
;
173 struct file
**fp
= UNIXCB(skb
).fp
->fp
;
176 * Get the socket the fd matches
177 * if it indeed does so
179 struct sock
*sk
= unix_get_socket(*fp
++);
181 struct unix_sock
*u
= unix_sk(sk
);
184 * Ignore non-candidates, they could
185 * have been added to the queues after
186 * starting the garbage collection
188 if (u
->gc_candidate
) {
194 if (hit
&& hitlist
!= NULL
) {
195 __skb_unlink(skb
, &x
->sk_receive_queue
);
196 __skb_queue_tail(hitlist
, skb
);
200 spin_unlock(&x
->sk_receive_queue
.lock
);
203 static void scan_children(struct sock
*x
, void (*func
)(struct unix_sock
*),
204 struct sk_buff_head
*hitlist
)
206 if (x
->sk_state
!= TCP_LISTEN
)
207 scan_inflight(x
, func
, hitlist
);
210 struct sk_buff
*next
;
215 * For a listening socket collect the queued embryos
216 * and perform a scan on them as well.
218 spin_lock(&x
->sk_receive_queue
.lock
);
219 skb_queue_walk_safe(&x
->sk_receive_queue
, skb
, next
) {
220 u
= unix_sk(skb
->sk
);
223 * An embryo cannot be in-flight, so it's safe
224 * to use the list link.
226 BUG_ON(!list_empty(&u
->link
));
227 list_add_tail(&u
->link
, &embryos
);
229 spin_unlock(&x
->sk_receive_queue
.lock
);
231 while (!list_empty(&embryos
)) {
232 u
= list_entry(embryos
.next
, struct unix_sock
, link
);
233 scan_inflight(&u
->sk
, func
, hitlist
);
234 list_del_init(&u
->link
);
239 static void dec_inflight(struct unix_sock
*usk
)
241 atomic_long_dec(&usk
->inflight
);
244 static void inc_inflight(struct unix_sock
*usk
)
246 atomic_long_inc(&usk
->inflight
);
249 static void inc_inflight_move_tail(struct unix_sock
*u
)
251 atomic_long_inc(&u
->inflight
);
253 * If this still might be part of a cycle, move it to the end
254 * of the list, so that it's checked even if it was already
257 if (u
->gc_maybe_cycle
)
258 list_move_tail(&u
->link
, &gc_candidates
);
261 static bool gc_in_progress
= false;
262 #define UNIX_INFLIGHT_TRIGGER_GC 16000
264 void wait_for_unix_gc(void)
267 * If number of inflight sockets is insane,
268 * force a garbage collect right now.
270 if (unix_tot_inflight
> UNIX_INFLIGHT_TRIGGER_GC
&& !gc_in_progress
)
272 wait_event(unix_gc_wait
, gc_in_progress
== false);
275 /* The external entry point: unix_gc() */
279 struct unix_sock
*next
;
280 struct sk_buff_head hitlist
;
281 struct list_head cursor
;
282 LIST_HEAD(not_cycle_list
);
284 spin_lock(&unix_gc_lock
);
286 /* Avoid a recursive GC. */
290 gc_in_progress
= true;
292 * First, select candidates for garbage collection. Only
293 * in-flight sockets are considered, and from those only ones
294 * which don't have any external reference.
296 * Holding unix_gc_lock will protect these candidates from
297 * being detached, and hence from gaining an external
298 * reference. Since there are no possible receivers, all
299 * buffers currently on the candidates' queues stay there
300 * during the garbage collection.
302 * We also know that no new candidate can be added onto the
303 * receive queues. Other, non candidate sockets _can_ be
304 * added to queue, so we must make sure only to touch
307 list_for_each_entry_safe(u
, next
, &gc_inflight_list
, link
) {
311 total_refs
= file_count(u
->sk
.sk_socket
->file
);
312 inflight_refs
= atomic_long_read(&u
->inflight
);
314 BUG_ON(inflight_refs
< 1);
315 BUG_ON(total_refs
< inflight_refs
);
316 if (total_refs
== inflight_refs
) {
317 list_move_tail(&u
->link
, &gc_candidates
);
319 u
->gc_maybe_cycle
= 1;
324 * Now remove all internal in-flight reference to children of
327 list_for_each_entry(u
, &gc_candidates
, link
)
328 scan_children(&u
->sk
, dec_inflight
, NULL
);
331 * Restore the references for children of all candidates,
332 * which have remaining references. Do this recursively, so
333 * only those remain, which form cyclic references.
335 * Use a "cursor" link, to make the list traversal safe, even
336 * though elements might be moved about.
338 list_add(&cursor
, &gc_candidates
);
339 while (cursor
.next
!= &gc_candidates
) {
340 u
= list_entry(cursor
.next
, struct unix_sock
, link
);
342 /* Move cursor to after the current position. */
343 list_move(&cursor
, &u
->link
);
345 if (atomic_long_read(&u
->inflight
) > 0) {
346 list_move_tail(&u
->link
, ¬_cycle_list
);
347 u
->gc_maybe_cycle
= 0;
348 scan_children(&u
->sk
, inc_inflight_move_tail
, NULL
);
354 * not_cycle_list contains those sockets which do not make up a
355 * cycle. Restore these to the inflight list.
357 while (!list_empty(¬_cycle_list
)) {
358 u
= list_entry(not_cycle_list
.next
, struct unix_sock
, link
);
360 list_move_tail(&u
->link
, &gc_inflight_list
);
364 * Now gc_candidates contains only garbage. Restore original
365 * inflight counters for these as well, and remove the skbuffs
366 * which are creating the cycle(s).
368 skb_queue_head_init(&hitlist
);
369 list_for_each_entry(u
, &gc_candidates
, link
)
370 scan_children(&u
->sk
, inc_inflight
, &hitlist
);
372 spin_unlock(&unix_gc_lock
);
374 /* Here we are. Hitlist is filled. Die. */
375 __skb_queue_purge(&hitlist
);
377 spin_lock(&unix_gc_lock
);
379 /* All candidates should have been detached by now. */
380 BUG_ON(!list_empty(&gc_candidates
));
381 gc_in_progress
= false;
382 wake_up(&unix_gc_wait
);
385 spin_unlock(&unix_gc_lock
);