ieee1394: sbp2: remove bogus "emulated" host flag
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / core / flow.c
blob5d25697920b1ae183fe83e627044c601cb845632
1 /* flow.c: Generic flow cache.
3 * Copyright (C) 2003 Alexey N. Kuznetsov (kuznet@ms2.inr.ac.ru)
4 * Copyright (C) 2003 David S. Miller (davem@redhat.com)
5 */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/list.h>
10 #include <linux/jhash.h>
11 #include <linux/interrupt.h>
12 #include <linux/mm.h>
13 #include <linux/random.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/smp.h>
17 #include <linux/completion.h>
18 #include <linux/percpu.h>
19 #include <linux/bitops.h>
20 #include <linux/notifier.h>
21 #include <linux/cpu.h>
22 #include <linux/cpumask.h>
23 #include <linux/mutex.h>
24 #include <net/flow.h>
25 #include <asm/atomic.h>
26 #include <asm/semaphore.h>
27 #include <linux/security.h>
29 struct flow_cache_entry {
30 struct flow_cache_entry *next;
31 u16 family;
32 u8 dir;
33 struct flowi key;
34 u32 genid;
35 void *object;
36 atomic_t *object_ref;
39 atomic_t flow_cache_genid = ATOMIC_INIT(0);
41 static u32 flow_hash_shift;
42 #define flow_hash_size (1 << flow_hash_shift)
43 static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
45 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
47 static struct kmem_cache *flow_cachep __read_mostly;
49 static int flow_lwm, flow_hwm;
51 struct flow_percpu_info {
52 int hash_rnd_recalc;
53 u32 hash_rnd;
54 int count;
55 } ____cacheline_aligned;
56 static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
58 #define flow_hash_rnd_recalc(cpu) \
59 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
60 #define flow_hash_rnd(cpu) \
61 (per_cpu(flow_hash_info, cpu).hash_rnd)
62 #define flow_count(cpu) \
63 (per_cpu(flow_hash_info, cpu).count)
65 static struct timer_list flow_hash_rnd_timer;
67 #define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
69 struct flow_flush_info {
70 atomic_t cpuleft;
71 struct completion completion;
73 static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
75 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
77 static void flow_cache_new_hashrnd(unsigned long arg)
79 int i;
81 for_each_possible_cpu(i)
82 flow_hash_rnd_recalc(i) = 1;
84 flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
85 add_timer(&flow_hash_rnd_timer);
88 static void flow_entry_kill(int cpu, struct flow_cache_entry *fle)
90 if (fle->object)
91 atomic_dec(fle->object_ref);
92 kmem_cache_free(flow_cachep, fle);
93 flow_count(cpu)--;
96 static void __flow_cache_shrink(int cpu, int shrink_to)
98 struct flow_cache_entry *fle, **flp;
99 int i;
101 for (i = 0; i < flow_hash_size; i++) {
102 int k = 0;
104 flp = &flow_table(cpu)[i];
105 while ((fle = *flp) != NULL && k < shrink_to) {
106 k++;
107 flp = &fle->next;
109 while ((fle = *flp) != NULL) {
110 *flp = fle->next;
111 flow_entry_kill(cpu, fle);
116 static void flow_cache_shrink(int cpu)
118 int shrink_to = flow_lwm / flow_hash_size;
120 __flow_cache_shrink(cpu, shrink_to);
123 static void flow_new_hash_rnd(int cpu)
125 get_random_bytes(&flow_hash_rnd(cpu), sizeof(u32));
126 flow_hash_rnd_recalc(cpu) = 0;
128 __flow_cache_shrink(cpu, 0);
131 static u32 flow_hash_code(struct flowi *key, int cpu)
133 u32 *k = (u32 *) key;
135 return (jhash2(k, (sizeof(*key) / sizeof(u32)), flow_hash_rnd(cpu)) &
136 (flow_hash_size - 1));
139 #if (BITS_PER_LONG == 64)
140 typedef u64 flow_compare_t;
141 #else
142 typedef u32 flow_compare_t;
143 #endif
145 extern void flowi_is_missized(void);
147 /* I hear what you're saying, use memcmp. But memcmp cannot make
148 * important assumptions that we can here, such as alignment and
149 * constant size.
151 static int flow_key_compare(struct flowi *key1, struct flowi *key2)
153 flow_compare_t *k1, *k1_lim, *k2;
154 const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t);
156 if (sizeof(struct flowi) % sizeof(flow_compare_t))
157 flowi_is_missized();
159 k1 = (flow_compare_t *) key1;
160 k1_lim = k1 + n_elem;
162 k2 = (flow_compare_t *) key2;
164 do {
165 if (*k1++ != *k2++)
166 return 1;
167 } while (k1 < k1_lim);
169 return 0;
172 void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir,
173 flow_resolve_t resolver)
175 struct flow_cache_entry *fle, **head;
176 unsigned int hash;
177 int cpu;
179 local_bh_disable();
180 cpu = smp_processor_id();
182 fle = NULL;
183 /* Packet really early in init? Making flow_cache_init a
184 * pre-smp initcall would solve this. --RR */
185 if (!flow_table(cpu))
186 goto nocache;
188 if (flow_hash_rnd_recalc(cpu))
189 flow_new_hash_rnd(cpu);
190 hash = flow_hash_code(key, cpu);
192 head = &flow_table(cpu)[hash];
193 for (fle = *head; fle; fle = fle->next) {
194 if (fle->family == family &&
195 fle->dir == dir &&
196 flow_key_compare(key, &fle->key) == 0) {
197 if (fle->genid == atomic_read(&flow_cache_genid)) {
198 void *ret = fle->object;
200 if (ret)
201 atomic_inc(fle->object_ref);
202 local_bh_enable();
204 return ret;
206 break;
210 if (!fle) {
211 if (flow_count(cpu) > flow_hwm)
212 flow_cache_shrink(cpu);
214 fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
215 if (fle) {
216 fle->next = *head;
217 *head = fle;
218 fle->family = family;
219 fle->dir = dir;
220 memcpy(&fle->key, key, sizeof(*key));
221 fle->object = NULL;
222 flow_count(cpu)++;
226 nocache:
228 int err;
229 void *obj;
230 atomic_t *obj_ref;
232 err = resolver(key, family, dir, &obj, &obj_ref);
234 if (fle && !err) {
235 fle->genid = atomic_read(&flow_cache_genid);
237 if (fle->object)
238 atomic_dec(fle->object_ref);
240 fle->object = obj;
241 fle->object_ref = obj_ref;
242 if (obj)
243 atomic_inc(fle->object_ref);
245 local_bh_enable();
247 if (err)
248 obj = ERR_PTR(err);
249 return obj;
253 static void flow_cache_flush_tasklet(unsigned long data)
255 struct flow_flush_info *info = (void *)data;
256 int i;
257 int cpu;
259 cpu = smp_processor_id();
260 for (i = 0; i < flow_hash_size; i++) {
261 struct flow_cache_entry *fle;
263 fle = flow_table(cpu)[i];
264 for (; fle; fle = fle->next) {
265 unsigned genid = atomic_read(&flow_cache_genid);
267 if (!fle->object || fle->genid == genid)
268 continue;
270 fle->object = NULL;
271 atomic_dec(fle->object_ref);
275 if (atomic_dec_and_test(&info->cpuleft))
276 complete(&info->completion);
279 static void flow_cache_flush_per_cpu(void *) __attribute__((__unused__));
280 static void flow_cache_flush_per_cpu(void *data)
282 struct flow_flush_info *info = data;
283 int cpu;
284 struct tasklet_struct *tasklet;
286 cpu = smp_processor_id();
288 tasklet = flow_flush_tasklet(cpu);
289 tasklet->data = (unsigned long)info;
290 tasklet_schedule(tasklet);
293 void flow_cache_flush(void)
295 struct flow_flush_info info;
296 static DEFINE_MUTEX(flow_flush_sem);
298 /* Don't want cpus going down or up during this. */
299 lock_cpu_hotplug();
300 mutex_lock(&flow_flush_sem);
301 atomic_set(&info.cpuleft, num_online_cpus());
302 init_completion(&info.completion);
304 local_bh_disable();
305 smp_call_function(flow_cache_flush_per_cpu, &info, 1, 0);
306 flow_cache_flush_tasklet((unsigned long)&info);
307 local_bh_enable();
309 wait_for_completion(&info.completion);
310 mutex_unlock(&flow_flush_sem);
311 unlock_cpu_hotplug();
314 static void __devinit flow_cache_cpu_prepare(int cpu)
316 struct tasklet_struct *tasklet;
317 unsigned long order;
319 for (order = 0;
320 (PAGE_SIZE << order) <
321 (sizeof(struct flow_cache_entry *)*flow_hash_size);
322 order++)
323 /* NOTHING */;
325 flow_table(cpu) = (struct flow_cache_entry **)
326 __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
327 if (!flow_table(cpu))
328 panic("NET: failed to allocate flow cache order %lu\n", order);
330 flow_hash_rnd_recalc(cpu) = 1;
331 flow_count(cpu) = 0;
333 tasklet = flow_flush_tasklet(cpu);
334 tasklet_init(tasklet, flow_cache_flush_tasklet, 0);
337 static int flow_cache_cpu(struct notifier_block *nfb,
338 unsigned long action,
339 void *hcpu)
341 if (action == CPU_DEAD)
342 __flow_cache_shrink((unsigned long)hcpu, 0);
343 return NOTIFY_OK;
346 static int __init flow_cache_init(void)
348 int i;
350 flow_cachep = kmem_cache_create("flow_cache",
351 sizeof(struct flow_cache_entry),
352 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
353 NULL, NULL);
354 flow_hash_shift = 10;
355 flow_lwm = 2 * flow_hash_size;
356 flow_hwm = 4 * flow_hash_size;
358 init_timer(&flow_hash_rnd_timer);
359 flow_hash_rnd_timer.function = flow_cache_new_hashrnd;
360 flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
361 add_timer(&flow_hash_rnd_timer);
363 for_each_possible_cpu(i)
364 flow_cache_cpu_prepare(i);
366 hotcpu_notifier(flow_cache_cpu, 0);
367 return 0;
370 module_init(flow_cache_init);
372 EXPORT_SYMBOL(flow_cache_genid);
373 EXPORT_SYMBOL(flow_cache_lookup);