Import 2.3.10pre3
[davej-history.git] / net / core / dst.c
blobf1695ca84451a0f83dbc3da3d7d2576777564f24
1 /*
2 * net/dst.c Protocol independent destination cache.
4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 */
8 #include <asm/segment.h>
9 #include <asm/system.h>
10 #include <asm/bitops.h>
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/mm.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/netdevice.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
21 #include <net/dst.h>
23 /* Locking strategy:
24 * 1) Garbage collection state of dead destination cache
25 * entries is protected by dst_lock.
26 * 2) GC is run only from BH context, and is the only remover
27 * of entries.
28 * 3) Entries are added to the garbage list from both BH
29 * and non-BH context, so local BH disabling is needed.
30 * 4) All operations modify state, so a spinlock is used.
32 static struct dst_entry *dst_garbage_list;
33 static atomic_t dst_total = ATOMIC_INIT(0);
34 static spinlock_t dst_lock = SPIN_LOCK_UNLOCKED;
36 static unsigned long dst_gc_timer_expires;
37 static unsigned long dst_gc_timer_inc = DST_GC_MAX;
38 static void dst_run_gc(unsigned long);
40 static struct timer_list dst_gc_timer =
41 { NULL, NULL, DST_GC_MIN, 0L, dst_run_gc };
44 static void dst_run_gc(unsigned long dummy)
46 int delayed = 0;
47 struct dst_entry * dst, **dstp;
49 if (!spin_trylock(&dst_lock)) {
50 mod_timer(&dst_gc_timer, jiffies + HZ/10);
51 return;
54 del_timer(&dst_gc_timer);
55 dstp = &dst_garbage_list;
56 while ((dst = *dstp) != NULL) {
57 if (atomic_read(&dst->use)) {
58 dstp = &dst->next;
59 delayed++;
60 continue;
62 *dstp = dst->next;
63 dst_destroy(dst);
65 if (!dst_garbage_list) {
66 dst_gc_timer_inc = DST_GC_MAX;
67 goto out;
69 if ((dst_gc_timer_expires += dst_gc_timer_inc) > DST_GC_MAX)
70 dst_gc_timer_expires = DST_GC_MAX;
71 dst_gc_timer_inc += DST_GC_INC;
72 dst_gc_timer.expires = jiffies + dst_gc_timer_expires;
73 #if RT_CACHE_DEBUG >= 2
74 printk("dst_total: %d/%d %ld\n",
75 atomic_read(&dst_total), delayed, dst_gc_timer_expires);
76 #endif
77 add_timer(&dst_gc_timer);
79 out:
80 spin_unlock(&dst_lock);
83 static int dst_discard(struct sk_buff *skb)
85 kfree_skb(skb);
86 return 0;
89 static int dst_blackhole(struct sk_buff *skb)
91 kfree_skb(skb);
92 return 0;
95 void * dst_alloc(int size, struct dst_ops * ops)
97 struct dst_entry * dst;
99 if (ops->gc && atomic_read(&ops->entries) > ops->gc_thresh) {
100 if (ops->gc())
101 return NULL;
103 dst = kmalloc(size, GFP_ATOMIC);
104 if (!dst)
105 return NULL;
106 memset(dst, 0, size);
107 dst->ops = ops;
108 atomic_set(&dst->refcnt, 0);
109 dst->lastuse = jiffies;
110 dst->input = dst_discard;
111 dst->output = dst_blackhole;
112 atomic_inc(&dst_total);
113 atomic_inc(&ops->entries);
114 return dst;
117 void __dst_free(struct dst_entry * dst)
119 spin_lock_bh(&dst_lock);
121 /* The first case (dev==NULL) is required, when
122 protocol module is unloaded.
124 if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) {
125 dst->input = dst_discard;
126 dst->output = dst_blackhole;
127 dst->dev = &loopback_dev;
129 dst->obsolete = 2;
130 dst->next = dst_garbage_list;
131 dst_garbage_list = dst;
132 if (dst_gc_timer_inc > DST_GC_INC) {
133 del_timer(&dst_gc_timer);
134 dst_gc_timer_inc = DST_GC_INC;
135 dst_gc_timer_expires = DST_GC_MIN;
136 dst_gc_timer.expires = jiffies + dst_gc_timer_expires;
137 add_timer(&dst_gc_timer);
140 spin_unlock_bh(&dst_lock);
143 void dst_destroy(struct dst_entry * dst)
145 struct neighbour *neigh = dst->neighbour;
146 struct hh_cache *hh = dst->hh;
148 dst->hh = NULL;
149 if (hh && atomic_dec_and_test(&hh->hh_refcnt))
150 kfree(hh);
152 if (neigh) {
153 dst->neighbour = NULL;
154 neigh_release(neigh);
157 atomic_dec(&dst->ops->entries);
159 if (dst->ops->destroy)
160 dst->ops->destroy(dst);
161 atomic_dec(&dst_total);
162 kfree(dst);
165 static int dst_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
167 struct device *dev = ptr;
168 struct dst_entry *dst;
170 switch (event) {
171 case NETDEV_UNREGISTER:
172 case NETDEV_DOWN:
173 spin_lock_bh(&dst_lock);
174 for (dst = dst_garbage_list; dst; dst = dst->next) {
175 if (dst->dev == dev) {
176 dst->input = dst_discard;
177 dst->output = dst_blackhole;
178 dst->dev = &loopback_dev;
181 spin_unlock_bh(&dst_lock);
182 break;
184 return NOTIFY_DONE;
187 struct notifier_block dst_dev_notifier = {
188 dst_dev_event,
189 NULL,
193 __initfunc(void dst_init(void))
195 register_netdevice_notifier(&dst_dev_notifier);