2 * net/dst.c Protocol independent destination cache.
4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
8 #include <asm/segment.h>
9 #include <asm/system.h>
10 #include <asm/bitops.h>
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/netdevice.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
24 * 1) Garbage collection state of dead destination cache
25 * entries is protected by dst_lock.
26 * 2) GC is run only from BH context, and is the only remover
28 * 3) Entries are added to the garbage list from both BH
29 * and non-BH context, so local BH disabling is needed.
30 * 4) All operations modify state, so a spinlock is used.
32 static struct dst_entry
*dst_garbage_list
;
33 static atomic_t dst_total
= ATOMIC_INIT(0);
34 static spinlock_t dst_lock
= SPIN_LOCK_UNLOCKED
;
36 static unsigned long dst_gc_timer_expires
;
37 static unsigned long dst_gc_timer_inc
= DST_GC_MAX
;
38 static void dst_run_gc(unsigned long);
40 static struct timer_list dst_gc_timer
=
41 { NULL
, NULL
, DST_GC_MIN
, 0L, dst_run_gc
};
44 static void dst_run_gc(unsigned long dummy
)
47 struct dst_entry
* dst
, **dstp
;
49 if (!spin_trylock(&dst_lock
)) {
50 mod_timer(&dst_gc_timer
, jiffies
+ HZ
/10);
54 del_timer(&dst_gc_timer
);
55 dstp
= &dst_garbage_list
;
56 while ((dst
= *dstp
) != NULL
) {
57 if (atomic_read(&dst
->use
)) {
65 if (!dst_garbage_list
) {
66 dst_gc_timer_inc
= DST_GC_MAX
;
69 if ((dst_gc_timer_expires
+= dst_gc_timer_inc
) > DST_GC_MAX
)
70 dst_gc_timer_expires
= DST_GC_MAX
;
71 dst_gc_timer_inc
+= DST_GC_INC
;
72 dst_gc_timer
.expires
= jiffies
+ dst_gc_timer_expires
;
73 #if RT_CACHE_DEBUG >= 2
74 printk("dst_total: %d/%d %ld\n",
75 atomic_read(&dst_total
), delayed
, dst_gc_timer_expires
);
77 add_timer(&dst_gc_timer
);
80 spin_unlock(&dst_lock
);
83 static int dst_discard(struct sk_buff
*skb
)
89 static int dst_blackhole(struct sk_buff
*skb
)
95 void * dst_alloc(int size
, struct dst_ops
* ops
)
97 struct dst_entry
* dst
;
99 if (ops
->gc
&& atomic_read(&ops
->entries
) > ops
->gc_thresh
) {
103 dst
= kmalloc(size
, GFP_ATOMIC
);
106 memset(dst
, 0, size
);
108 atomic_set(&dst
->refcnt
, 0);
109 dst
->lastuse
= jiffies
;
110 dst
->input
= dst_discard
;
111 dst
->output
= dst_blackhole
;
112 atomic_inc(&dst_total
);
113 atomic_inc(&ops
->entries
);
117 void __dst_free(struct dst_entry
* dst
)
119 spin_lock_bh(&dst_lock
);
121 /* The first case (dev==NULL) is required, when
122 protocol module is unloaded.
124 if (dst
->dev
== NULL
|| !(dst
->dev
->flags
&IFF_UP
)) {
125 dst
->input
= dst_discard
;
126 dst
->output
= dst_blackhole
;
127 dst
->dev
= &loopback_dev
;
130 dst
->next
= dst_garbage_list
;
131 dst_garbage_list
= dst
;
132 if (dst_gc_timer_inc
> DST_GC_INC
) {
133 del_timer(&dst_gc_timer
);
134 dst_gc_timer_inc
= DST_GC_INC
;
135 dst_gc_timer_expires
= DST_GC_MIN
;
136 dst_gc_timer
.expires
= jiffies
+ dst_gc_timer_expires
;
137 add_timer(&dst_gc_timer
);
140 spin_unlock_bh(&dst_lock
);
143 void dst_destroy(struct dst_entry
* dst
)
145 struct neighbour
*neigh
= dst
->neighbour
;
146 struct hh_cache
*hh
= dst
->hh
;
149 if (hh
&& atomic_dec_and_test(&hh
->hh_refcnt
))
153 dst
->neighbour
= NULL
;
154 neigh_release(neigh
);
157 atomic_dec(&dst
->ops
->entries
);
159 if (dst
->ops
->destroy
)
160 dst
->ops
->destroy(dst
);
161 atomic_dec(&dst_total
);
165 static int dst_dev_event(struct notifier_block
*this, unsigned long event
, void *ptr
)
167 struct device
*dev
= ptr
;
168 struct dst_entry
*dst
;
171 case NETDEV_UNREGISTER
:
173 spin_lock_bh(&dst_lock
);
174 for (dst
= dst_garbage_list
; dst
; dst
= dst
->next
) {
175 if (dst
->dev
== dev
) {
176 dst
->input
= dst_discard
;
177 dst
->output
= dst_blackhole
;
178 dst
->dev
= &loopback_dev
;
181 spin_unlock_bh(&dst_lock
);
187 struct notifier_block dst_dev_notifier
= {
193 __initfunc(void dst_init(void))
195 register_netdevice_notifier(&dst_dev_notifier
);