hwmon: Schedule the removal of the old intrusion detection interfaces
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / core / dst.c
blobb99c7c7ffce2b72b0089ba8c53d4de6e5b131059
1 /*
2 * net/core/dst.c Protocol independent destination cache.
4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 */
8 #include <linux/bitops.h>
9 #include <linux/errno.h>
10 #include <linux/init.h>
11 #include <linux/kernel.h>
12 #include <linux/workqueue.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/netdevice.h>
17 #include <linux/skbuff.h>
18 #include <linux/string.h>
19 #include <linux/types.h>
20 #include <net/net_namespace.h>
21 #include <linux/sched.h>
23 #include <net/dst.h>
26 * Theory of operations:
27 * 1) We use a list, protected by a spinlock, to add
28 * new entries from both BH and non-BH context.
29 * 2) In order to keep spinlock held for a small delay,
30 * we use a second list where are stored long lived
31 * entries, that are handled by the garbage collect thread
32 * fired by a workqueue.
33 * 3) This list is guarded by a mutex,
34 * so that the gc_task and dst_dev_event() can be synchronized.
36 #if RT_CACHE_DEBUG >= 2
37 static atomic_t dst_total = ATOMIC_INIT(0);
38 #endif
41 * We want to keep lock & list close together
42 * to dirty as few cache lines as possible in __dst_free().
43 * As this is not a very strong hint, we dont force an alignment on SMP.
45 static struct {
46 spinlock_t lock;
47 struct dst_entry *list;
48 unsigned long timer_inc;
49 unsigned long timer_expires;
50 } dst_garbage = {
51 .lock = __SPIN_LOCK_UNLOCKED(dst_garbage.lock),
52 .timer_inc = DST_GC_MAX,
54 static void dst_gc_task(struct work_struct *work);
55 static void ___dst_free(struct dst_entry *dst);
57 static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task);
59 static DEFINE_MUTEX(dst_gc_mutex);
61 * long lived entries are maintained in this list, guarded by dst_gc_mutex
63 static struct dst_entry *dst_busy_list;
65 static void dst_gc_task(struct work_struct *work)
67 int delayed = 0;
68 int work_performed = 0;
69 unsigned long expires = ~0L;
70 struct dst_entry *dst, *next, head;
71 struct dst_entry *last = &head;
72 #if RT_CACHE_DEBUG >= 2
73 ktime_t time_start = ktime_get();
74 struct timespec elapsed;
75 #endif
77 mutex_lock(&dst_gc_mutex);
78 next = dst_busy_list;
80 loop:
81 while ((dst = next) != NULL) {
82 next = dst->next;
83 prefetch(&next->next);
84 cond_resched();
85 if (likely(atomic_read(&dst->__refcnt))) {
86 last->next = dst;
87 last = dst;
88 delayed++;
89 continue;
91 work_performed++;
93 dst = dst_destroy(dst);
94 if (dst) {
95 /* NOHASH and still referenced. Unless it is already
96 * on gc list, invalidate it and add to gc list.
98 * Note: this is temporary. Actually, NOHASH dst's
99 * must be obsoleted when parent is obsoleted.
100 * But we do not have state "obsoleted, but
101 * referenced by parent", so it is right.
103 if (dst->obsolete > 1)
104 continue;
106 ___dst_free(dst);
107 dst->next = next;
108 next = dst;
112 spin_lock_bh(&dst_garbage.lock);
113 next = dst_garbage.list;
114 if (next) {
115 dst_garbage.list = NULL;
116 spin_unlock_bh(&dst_garbage.lock);
117 goto loop;
119 last->next = NULL;
120 dst_busy_list = head.next;
121 if (!dst_busy_list)
122 dst_garbage.timer_inc = DST_GC_MAX;
123 else {
125 * if we freed less than 1/10 of delayed entries,
126 * we can sleep longer.
128 if (work_performed <= delayed/10) {
129 dst_garbage.timer_expires += dst_garbage.timer_inc;
130 if (dst_garbage.timer_expires > DST_GC_MAX)
131 dst_garbage.timer_expires = DST_GC_MAX;
132 dst_garbage.timer_inc += DST_GC_INC;
133 } else {
134 dst_garbage.timer_inc = DST_GC_INC;
135 dst_garbage.timer_expires = DST_GC_MIN;
137 expires = dst_garbage.timer_expires;
139 * if the next desired timer is more than 4 seconds in the
140 * future then round the timer to whole seconds
142 if (expires > 4*HZ)
143 expires = round_jiffies_relative(expires);
144 schedule_delayed_work(&dst_gc_work, expires);
147 spin_unlock_bh(&dst_garbage.lock);
148 mutex_unlock(&dst_gc_mutex);
149 #if RT_CACHE_DEBUG >= 2
150 elapsed = ktime_to_timespec(ktime_sub(ktime_get(), time_start));
151 printk(KERN_DEBUG "dst_total: %d delayed: %d work_perf: %d"
152 " expires: %lu elapsed: %lu us\n",
153 atomic_read(&dst_total), delayed, work_performed,
154 expires,
155 elapsed.tv_sec * USEC_PER_SEC +
156 elapsed.tv_nsec / NSEC_PER_USEC);
157 #endif
160 int dst_discard(struct sk_buff *skb)
162 kfree_skb(skb);
163 return 0;
165 EXPORT_SYMBOL(dst_discard);
167 void *dst_alloc(struct dst_ops *ops)
169 struct dst_entry *dst;
171 if (ops->gc && dst_entries_get_fast(ops) > ops->gc_thresh) {
172 if (ops->gc(ops))
173 return NULL;
175 dst = kmem_cache_zalloc(ops->kmem_cachep, GFP_ATOMIC);
176 if (!dst)
177 return NULL;
178 atomic_set(&dst->__refcnt, 0);
179 dst->ops = ops;
180 dst->lastuse = jiffies;
181 dst->path = dst;
182 dst->input = dst->output = dst_discard;
183 #if RT_CACHE_DEBUG >= 2
184 atomic_inc(&dst_total);
185 #endif
186 dst_entries_add(ops, 1);
187 return dst;
189 EXPORT_SYMBOL(dst_alloc);
191 static void ___dst_free(struct dst_entry *dst)
193 /* The first case (dev==NULL) is required, when
194 protocol module is unloaded.
196 if (dst->dev == NULL || !(dst->dev->flags&IFF_UP))
197 dst->input = dst->output = dst_discard;
198 dst->obsolete = 2;
201 void __dst_free(struct dst_entry *dst)
203 spin_lock_bh(&dst_garbage.lock);
204 ___dst_free(dst);
205 dst->next = dst_garbage.list;
206 dst_garbage.list = dst;
207 if (dst_garbage.timer_inc > DST_GC_INC) {
208 dst_garbage.timer_inc = DST_GC_INC;
209 dst_garbage.timer_expires = DST_GC_MIN;
210 cancel_delayed_work(&dst_gc_work);
211 schedule_delayed_work(&dst_gc_work, dst_garbage.timer_expires);
213 spin_unlock_bh(&dst_garbage.lock);
215 EXPORT_SYMBOL(__dst_free);
217 struct dst_entry *dst_destroy(struct dst_entry * dst)
219 struct dst_entry *child;
220 struct neighbour *neigh;
221 struct hh_cache *hh;
223 smp_rmb();
225 again:
226 neigh = dst->neighbour;
227 hh = dst->hh;
228 child = dst->child;
230 dst->hh = NULL;
231 if (hh)
232 hh_cache_put(hh);
234 if (neigh) {
235 dst->neighbour = NULL;
236 neigh_release(neigh);
239 dst_entries_add(dst->ops, -1);
241 if (dst->ops->destroy)
242 dst->ops->destroy(dst);
243 if (dst->dev)
244 dev_put(dst->dev);
245 #if RT_CACHE_DEBUG >= 2
246 atomic_dec(&dst_total);
247 #endif
248 kmem_cache_free(dst->ops->kmem_cachep, dst);
250 dst = child;
251 if (dst) {
252 int nohash = dst->flags & DST_NOHASH;
254 if (atomic_dec_and_test(&dst->__refcnt)) {
255 /* We were real parent of this dst, so kill child. */
256 if (nohash)
257 goto again;
258 } else {
259 /* Child is still referenced, return it for freeing. */
260 if (nohash)
261 return dst;
262 /* Child is still in his hash table */
265 return NULL;
267 EXPORT_SYMBOL(dst_destroy);
269 void dst_release(struct dst_entry *dst)
271 if (dst) {
272 int newrefcnt;
274 newrefcnt = atomic_dec_return(&dst->__refcnt);
275 WARN_ON(newrefcnt < 0);
276 if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) {
277 dst = dst_destroy(dst);
278 if (dst)
279 __dst_free(dst);
283 EXPORT_SYMBOL(dst_release);
286 * skb_dst_set_noref - sets skb dst, without a reference
287 * @skb: buffer
288 * @dst: dst entry
290 * Sets skb dst, assuming a reference was not taken on dst
291 * skb_dst_drop() should not dst_release() this dst
293 void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
295 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
296 /* If dst not in cache, we must take a reference, because
297 * dst_release() will destroy dst as soon as its refcount becomes zero
299 if (unlikely(dst->flags & DST_NOCACHE)) {
300 dst_hold(dst);
301 skb_dst_set(skb, dst);
302 } else {
303 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
306 EXPORT_SYMBOL(skb_dst_set_noref);
308 /* Dirty hack. We did it in 2.2 (in __dst_free),
309 * we have _very_ good reasons not to repeat
310 * this mistake in 2.3, but we have no choice
311 * now. _It_ _is_ _explicit_ _deliberate_
312 * _race_ _condition_.
314 * Commented and originally written by Alexey.
316 static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
317 int unregister)
319 if (dst->ops->ifdown)
320 dst->ops->ifdown(dst, dev, unregister);
322 if (dev != dst->dev)
323 return;
325 if (!unregister) {
326 dst->input = dst->output = dst_discard;
327 } else {
328 dst->dev = dev_net(dst->dev)->loopback_dev;
329 dev_hold(dst->dev);
330 dev_put(dev);
331 if (dst->neighbour && dst->neighbour->dev == dev) {
332 dst->neighbour->dev = dst->dev;
333 dev_hold(dst->dev);
334 dev_put(dev);
339 static int dst_dev_event(struct notifier_block *this, unsigned long event,
340 void *ptr)
342 struct net_device *dev = ptr;
343 struct dst_entry *dst, *last = NULL;
345 switch (event) {
346 case NETDEV_UNREGISTER:
347 case NETDEV_DOWN:
348 mutex_lock(&dst_gc_mutex);
349 for (dst = dst_busy_list; dst; dst = dst->next) {
350 last = dst;
351 dst_ifdown(dst, dev, event != NETDEV_DOWN);
354 spin_lock_bh(&dst_garbage.lock);
355 dst = dst_garbage.list;
356 dst_garbage.list = NULL;
357 spin_unlock_bh(&dst_garbage.lock);
359 if (last)
360 last->next = dst;
361 else
362 dst_busy_list = dst;
363 for (; dst; dst = dst->next)
364 dst_ifdown(dst, dev, event != NETDEV_DOWN);
365 mutex_unlock(&dst_gc_mutex);
366 break;
368 return NOTIFY_DONE;
371 static struct notifier_block dst_dev_notifier = {
372 .notifier_call = dst_dev_event,
373 .priority = -10, /* must be called after other network notifiers */
376 void __init dst_init(void)
378 register_netdevice_notifier(&dst_dev_notifier);