2 * net/core/dst.c Protocol independent destination cache.
4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
8 #include <linux/bitops.h>
9 #include <linux/errno.h>
10 #include <linux/init.h>
11 #include <linux/kernel.h>
12 #include <linux/workqueue.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/netdevice.h>
17 #include <linux/skbuff.h>
18 #include <linux/string.h>
19 #include <linux/types.h>
20 #include <net/net_namespace.h>
21 #include <linux/sched.h>
26 * Theory of operations:
27 * 1) We use a list, protected by a spinlock, to add
28 * new entries from both BH and non-BH context.
29 * 2) In order to keep spinlock held for a small delay,
30 * we use a second list where are stored long lived
31 * entries, that are handled by the garbage collect thread
32 * fired by a workqueue.
33 * 3) This list is guarded by a mutex,
34 * so that the gc_task and dst_dev_event() can be synchronized.
36 #if RT_CACHE_DEBUG >= 2
37 static atomic_t dst_total
= ATOMIC_INIT(0);
41 * We want to keep lock & list close together
42 * to dirty as few cache lines as possible in __dst_free().
43 * As this is not a very strong hint, we dont force an alignment on SMP.
47 struct dst_entry
*list
;
48 unsigned long timer_inc
;
49 unsigned long timer_expires
;
51 .lock
= __SPIN_LOCK_UNLOCKED(dst_garbage
.lock
),
52 .timer_inc
= DST_GC_MAX
,
54 static void dst_gc_task(struct work_struct
*work
);
55 static void ___dst_free(struct dst_entry
* dst
);
57 static DECLARE_DELAYED_WORK(dst_gc_work
, dst_gc_task
);
59 static DEFINE_MUTEX(dst_gc_mutex
);
61 * long lived entries are maintained in this list, guarded by dst_gc_mutex
63 static struct dst_entry
*dst_busy_list
;
65 static void dst_gc_task(struct work_struct
*work
)
68 int work_performed
= 0;
69 unsigned long expires
= ~0L;
70 struct dst_entry
*dst
, *next
, head
;
71 struct dst_entry
*last
= &head
;
72 #if RT_CACHE_DEBUG >= 2
73 ktime_t time_start
= ktime_get();
74 struct timespec elapsed
;
77 mutex_lock(&dst_gc_mutex
);
81 while ((dst
= next
) != NULL
) {
83 prefetch(&next
->next
);
85 if (likely(atomic_read(&dst
->__refcnt
))) {
93 dst
= dst_destroy(dst
);
95 /* NOHASH and still referenced. Unless it is already
96 * on gc list, invalidate it and add to gc list.
98 * Note: this is temporary. Actually, NOHASH dst's
99 * must be obsoleted when parent is obsoleted.
100 * But we do not have state "obsoleted, but
101 * referenced by parent", so it is right.
103 if (dst
->obsolete
> 1)
112 spin_lock_bh(&dst_garbage
.lock
);
113 next
= dst_garbage
.list
;
115 dst_garbage
.list
= NULL
;
116 spin_unlock_bh(&dst_garbage
.lock
);
120 dst_busy_list
= head
.next
;
122 dst_garbage
.timer_inc
= DST_GC_MAX
;
125 * if we freed less than 1/10 of delayed entries,
126 * we can sleep longer.
128 if (work_performed
<= delayed
/10) {
129 dst_garbage
.timer_expires
+= dst_garbage
.timer_inc
;
130 if (dst_garbage
.timer_expires
> DST_GC_MAX
)
131 dst_garbage
.timer_expires
= DST_GC_MAX
;
132 dst_garbage
.timer_inc
+= DST_GC_INC
;
134 dst_garbage
.timer_inc
= DST_GC_INC
;
135 dst_garbage
.timer_expires
= DST_GC_MIN
;
137 expires
= dst_garbage
.timer_expires
;
139 * if the next desired timer is more than 4 seconds in the future
140 * then round the timer to whole seconds
143 expires
= round_jiffies_relative(expires
);
144 schedule_delayed_work(&dst_gc_work
, expires
);
147 spin_unlock_bh(&dst_garbage
.lock
);
148 mutex_unlock(&dst_gc_mutex
);
149 #if RT_CACHE_DEBUG >= 2
150 elapsed
= ktime_to_timespec(ktime_sub(ktime_get(), time_start
));
151 printk(KERN_DEBUG
"dst_total: %d delayed: %d work_perf: %d"
152 " expires: %lu elapsed: %lu us\n",
153 atomic_read(&dst_total
), delayed
, work_performed
,
155 elapsed
.tv_sec
* USEC_PER_SEC
+ elapsed
.tv_nsec
/ NSEC_PER_USEC
);
159 int dst_discard(struct sk_buff
*skb
)
164 EXPORT_SYMBOL(dst_discard
);
166 void * dst_alloc(struct dst_ops
* ops
)
168 struct dst_entry
* dst
;
170 if (ops
->gc
&& atomic_read(&ops
->entries
) > ops
->gc_thresh
) {
174 dst
= kmem_cache_zalloc(ops
->kmem_cachep
, GFP_ATOMIC
);
177 atomic_set(&dst
->__refcnt
, 0);
179 dst
->lastuse
= jiffies
;
181 dst
->input
= dst
->output
= dst_discard
;
182 #if RT_CACHE_DEBUG >= 2
183 atomic_inc(&dst_total
);
185 atomic_inc(&ops
->entries
);
189 static void ___dst_free(struct dst_entry
* dst
)
191 /* The first case (dev==NULL) is required, when
192 protocol module is unloaded.
194 if (dst
->dev
== NULL
|| !(dst
->dev
->flags
&IFF_UP
)) {
195 dst
->input
= dst
->output
= dst_discard
;
200 void __dst_free(struct dst_entry
* dst
)
202 spin_lock_bh(&dst_garbage
.lock
);
204 dst
->next
= dst_garbage
.list
;
205 dst_garbage
.list
= dst
;
206 if (dst_garbage
.timer_inc
> DST_GC_INC
) {
207 dst_garbage
.timer_inc
= DST_GC_INC
;
208 dst_garbage
.timer_expires
= DST_GC_MIN
;
209 cancel_delayed_work(&dst_gc_work
);
210 schedule_delayed_work(&dst_gc_work
, dst_garbage
.timer_expires
);
212 spin_unlock_bh(&dst_garbage
.lock
);
215 struct dst_entry
*dst_destroy(struct dst_entry
* dst
)
217 struct dst_entry
*child
;
218 struct neighbour
*neigh
;
224 neigh
= dst
->neighbour
;
229 if (hh
&& atomic_dec_and_test(&hh
->hh_refcnt
))
233 dst
->neighbour
= NULL
;
234 neigh_release(neigh
);
237 atomic_dec(&dst
->ops
->entries
);
239 if (dst
->ops
->destroy
)
240 dst
->ops
->destroy(dst
);
243 #if RT_CACHE_DEBUG >= 2
244 atomic_dec(&dst_total
);
246 kmem_cache_free(dst
->ops
->kmem_cachep
, dst
);
250 int nohash
= dst
->flags
& DST_NOHASH
;
252 if (atomic_dec_and_test(&dst
->__refcnt
)) {
253 /* We were real parent of this dst, so kill child. */
257 /* Child is still referenced, return it for freeing. */
260 /* Child is still in his hash table */
266 void dst_release(struct dst_entry
*dst
)
271 smp_mb__before_atomic_dec();
272 newrefcnt
= atomic_dec_return(&dst
->__refcnt
);
273 WARN_ON(newrefcnt
< 0);
276 EXPORT_SYMBOL(dst_release
);
278 /* Dirty hack. We did it in 2.2 (in __dst_free),
279 * we have _very_ good reasons not to repeat
280 * this mistake in 2.3, but we have no choice
281 * now. _It_ _is_ _explicit_ _deliberate_
282 * _race_ _condition_.
284 * Commented and originally written by Alexey.
286 static inline void dst_ifdown(struct dst_entry
*dst
, struct net_device
*dev
,
289 if (dst
->ops
->ifdown
)
290 dst
->ops
->ifdown(dst
, dev
, unregister
);
296 dst
->input
= dst
->output
= dst_discard
;
298 dst
->dev
= dev_net(dst
->dev
)->loopback_dev
;
301 if (dst
->neighbour
&& dst
->neighbour
->dev
== dev
) {
302 dst
->neighbour
->dev
= dst
->dev
;
309 static int dst_dev_event(struct notifier_block
*this, unsigned long event
, void *ptr
)
311 struct net_device
*dev
= ptr
;
312 struct dst_entry
*dst
, *last
= NULL
;
315 case NETDEV_UNREGISTER
:
317 mutex_lock(&dst_gc_mutex
);
318 for (dst
= dst_busy_list
; dst
; dst
= dst
->next
) {
320 dst_ifdown(dst
, dev
, event
!= NETDEV_DOWN
);
323 spin_lock_bh(&dst_garbage
.lock
);
324 dst
= dst_garbage
.list
;
325 dst_garbage
.list
= NULL
;
326 spin_unlock_bh(&dst_garbage
.lock
);
332 for (; dst
; dst
= dst
->next
) {
333 dst_ifdown(dst
, dev
, event
!= NETDEV_DOWN
);
335 mutex_unlock(&dst_gc_mutex
);
341 static struct notifier_block dst_dev_notifier
= {
342 .notifier_call
= dst_dev_event
,
345 void __init
dst_init(void)
347 register_netdevice_notifier(&dst_dev_notifier
);
350 EXPORT_SYMBOL(__dst_free
);
351 EXPORT_SYMBOL(dst_alloc
);
352 EXPORT_SYMBOL(dst_destroy
);