NFSv4: Ensure that /proc/self/mountinfo displays the minor version number
[linux-2.6/kvm.git] / net / core / flow.c
blob161900674009d832752a2b3929586b53522f064f
1 /* flow.c: Generic flow cache.
3 * Copyright (C) 2003 Alexey N. Kuznetsov (kuznet@ms2.inr.ac.ru)
4 * Copyright (C) 2003 David S. Miller (davem@redhat.com)
5 */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/list.h>
10 #include <linux/jhash.h>
11 #include <linux/interrupt.h>
12 #include <linux/mm.h>
13 #include <linux/random.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/smp.h>
17 #include <linux/completion.h>
18 #include <linux/percpu.h>
19 #include <linux/bitops.h>
20 #include <linux/notifier.h>
21 #include <linux/cpu.h>
22 #include <linux/cpumask.h>
23 #include <linux/mutex.h>
24 #include <net/flow.h>
25 #include <asm/atomic.h>
26 #include <linux/security.h>
28 struct flow_cache_entry {
29 union {
30 struct hlist_node hlist;
31 struct list_head gc_list;
32 } u;
33 u16 family;
34 u8 dir;
35 u32 genid;
36 struct flowi key;
37 struct flow_cache_object *object;
40 struct flow_cache_percpu {
41 struct hlist_head *hash_table;
42 int hash_count;
43 u32 hash_rnd;
44 int hash_rnd_recalc;
45 struct tasklet_struct flush_tasklet;
48 struct flow_flush_info {
49 struct flow_cache *cache;
50 atomic_t cpuleft;
51 struct completion completion;
54 struct flow_cache {
55 u32 hash_shift;
56 unsigned long order;
57 struct flow_cache_percpu *percpu;
58 struct notifier_block hotcpu_notifier;
59 int low_watermark;
60 int high_watermark;
61 struct timer_list rnd_timer;
64 atomic_t flow_cache_genid = ATOMIC_INIT(0);
65 static struct flow_cache flow_cache_global;
66 static struct kmem_cache *flow_cachep;
68 static DEFINE_SPINLOCK(flow_cache_gc_lock);
69 static LIST_HEAD(flow_cache_gc_list);
71 #define flow_cache_hash_size(cache) (1 << (cache)->hash_shift)
72 #define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
74 static void flow_cache_new_hashrnd(unsigned long arg)
76 struct flow_cache *fc = (void *) arg;
77 int i;
79 for_each_possible_cpu(i)
80 per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1;
82 fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
83 add_timer(&fc->rnd_timer);
86 static int flow_entry_valid(struct flow_cache_entry *fle)
88 if (atomic_read(&flow_cache_genid) != fle->genid)
89 return 0;
90 if (fle->object && !fle->object->ops->check(fle->object))
91 return 0;
92 return 1;
95 static void flow_entry_kill(struct flow_cache_entry *fle)
97 if (fle->object)
98 fle->object->ops->delete(fle->object);
99 kmem_cache_free(flow_cachep, fle);
102 static void flow_cache_gc_task(struct work_struct *work)
104 struct list_head gc_list;
105 struct flow_cache_entry *fce, *n;
107 INIT_LIST_HEAD(&gc_list);
108 spin_lock_bh(&flow_cache_gc_lock);
109 list_splice_tail_init(&flow_cache_gc_list, &gc_list);
110 spin_unlock_bh(&flow_cache_gc_lock);
112 list_for_each_entry_safe(fce, n, &gc_list, u.gc_list)
113 flow_entry_kill(fce);
115 static DECLARE_WORK(flow_cache_gc_work, flow_cache_gc_task);
117 static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
118 int deleted, struct list_head *gc_list)
120 if (deleted) {
121 fcp->hash_count -= deleted;
122 spin_lock_bh(&flow_cache_gc_lock);
123 list_splice_tail(gc_list, &flow_cache_gc_list);
124 spin_unlock_bh(&flow_cache_gc_lock);
125 schedule_work(&flow_cache_gc_work);
129 static void __flow_cache_shrink(struct flow_cache *fc,
130 struct flow_cache_percpu *fcp,
131 int shrink_to)
133 struct flow_cache_entry *fle;
134 struct hlist_node *entry, *tmp;
135 LIST_HEAD(gc_list);
136 int i, deleted = 0;
138 for (i = 0; i < flow_cache_hash_size(fc); i++) {
139 int saved = 0;
141 hlist_for_each_entry_safe(fle, entry, tmp,
142 &fcp->hash_table[i], u.hlist) {
143 if (saved < shrink_to &&
144 flow_entry_valid(fle)) {
145 saved++;
146 } else {
147 deleted++;
148 hlist_del(&fle->u.hlist);
149 list_add_tail(&fle->u.gc_list, &gc_list);
154 flow_cache_queue_garbage(fcp, deleted, &gc_list);
157 static void flow_cache_shrink(struct flow_cache *fc,
158 struct flow_cache_percpu *fcp)
160 int shrink_to = fc->low_watermark / flow_cache_hash_size(fc);
162 __flow_cache_shrink(fc, fcp, shrink_to);
165 static void flow_new_hash_rnd(struct flow_cache *fc,
166 struct flow_cache_percpu *fcp)
168 get_random_bytes(&fcp->hash_rnd, sizeof(u32));
169 fcp->hash_rnd_recalc = 0;
170 __flow_cache_shrink(fc, fcp, 0);
173 static u32 flow_hash_code(struct flow_cache *fc,
174 struct flow_cache_percpu *fcp,
175 struct flowi *key)
177 u32 *k = (u32 *) key;
179 return (jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd)
180 & (flow_cache_hash_size(fc) - 1));
183 #if (BITS_PER_LONG == 64)
184 typedef u64 flow_compare_t;
185 #else
186 typedef u32 flow_compare_t;
187 #endif
189 /* I hear what you're saying, use memcmp. But memcmp cannot make
190 * important assumptions that we can here, such as alignment and
191 * constant size.
193 static int flow_key_compare(struct flowi *key1, struct flowi *key2)
195 flow_compare_t *k1, *k1_lim, *k2;
196 const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t);
198 BUILD_BUG_ON(sizeof(struct flowi) % sizeof(flow_compare_t));
200 k1 = (flow_compare_t *) key1;
201 k1_lim = k1 + n_elem;
203 k2 = (flow_compare_t *) key2;
205 do {
206 if (*k1++ != *k2++)
207 return 1;
208 } while (k1 < k1_lim);
210 return 0;
213 struct flow_cache_object *
214 flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
215 flow_resolve_t resolver, void *ctx)
217 struct flow_cache *fc = &flow_cache_global;
218 struct flow_cache_percpu *fcp;
219 struct flow_cache_entry *fle, *tfle;
220 struct hlist_node *entry;
221 struct flow_cache_object *flo;
222 unsigned int hash;
224 local_bh_disable();
225 fcp = per_cpu_ptr(fc->percpu, smp_processor_id());
227 fle = NULL;
228 flo = NULL;
229 /* Packet really early in init? Making flow_cache_init a
230 * pre-smp initcall would solve this. --RR */
231 if (!fcp->hash_table)
232 goto nocache;
234 if (fcp->hash_rnd_recalc)
235 flow_new_hash_rnd(fc, fcp);
237 hash = flow_hash_code(fc, fcp, key);
238 hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) {
239 if (tfle->family == family &&
240 tfle->dir == dir &&
241 flow_key_compare(key, &tfle->key) == 0) {
242 fle = tfle;
243 break;
247 if (unlikely(!fle)) {
248 if (fcp->hash_count > fc->high_watermark)
249 flow_cache_shrink(fc, fcp);
251 fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
252 if (fle) {
253 fle->family = family;
254 fle->dir = dir;
255 memcpy(&fle->key, key, sizeof(*key));
256 fle->object = NULL;
257 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
258 fcp->hash_count++;
260 } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
261 flo = fle->object;
262 if (!flo)
263 goto ret_object;
264 flo = flo->ops->get(flo);
265 if (flo)
266 goto ret_object;
267 } else if (fle->object) {
268 flo = fle->object;
269 flo->ops->delete(flo);
270 fle->object = NULL;
273 nocache:
274 flo = NULL;
275 if (fle) {
276 flo = fle->object;
277 fle->object = NULL;
279 flo = resolver(net, key, family, dir, flo, ctx);
280 if (fle) {
281 fle->genid = atomic_read(&flow_cache_genid);
282 if (!IS_ERR(flo))
283 fle->object = flo;
284 else
285 fle->genid--;
286 } else {
287 if (flo && !IS_ERR(flo))
288 flo->ops->delete(flo);
290 ret_object:
291 local_bh_enable();
292 return flo;
295 static void flow_cache_flush_tasklet(unsigned long data)
297 struct flow_flush_info *info = (void *)data;
298 struct flow_cache *fc = info->cache;
299 struct flow_cache_percpu *fcp;
300 struct flow_cache_entry *fle;
301 struct hlist_node *entry, *tmp;
302 LIST_HEAD(gc_list);
303 int i, deleted = 0;
305 fcp = per_cpu_ptr(fc->percpu, smp_processor_id());
306 for (i = 0; i < flow_cache_hash_size(fc); i++) {
307 hlist_for_each_entry_safe(fle, entry, tmp,
308 &fcp->hash_table[i], u.hlist) {
309 if (flow_entry_valid(fle))
310 continue;
312 deleted++;
313 hlist_del(&fle->u.hlist);
314 list_add_tail(&fle->u.gc_list, &gc_list);
318 flow_cache_queue_garbage(fcp, deleted, &gc_list);
320 if (atomic_dec_and_test(&info->cpuleft))
321 complete(&info->completion);
324 static void flow_cache_flush_per_cpu(void *data)
326 struct flow_flush_info *info = data;
327 int cpu;
328 struct tasklet_struct *tasklet;
330 cpu = smp_processor_id();
331 tasklet = &per_cpu_ptr(info->cache->percpu, cpu)->flush_tasklet;
332 tasklet->data = (unsigned long)info;
333 tasklet_schedule(tasklet);
336 void flow_cache_flush(void)
338 struct flow_flush_info info;
339 static DEFINE_MUTEX(flow_flush_sem);
341 /* Don't want cpus going down or up during this. */
342 get_online_cpus();
343 mutex_lock(&flow_flush_sem);
344 info.cache = &flow_cache_global;
345 atomic_set(&info.cpuleft, num_online_cpus());
346 init_completion(&info.completion);
348 local_bh_disable();
349 smp_call_function(flow_cache_flush_per_cpu, &info, 0);
350 flow_cache_flush_tasklet((unsigned long)&info);
351 local_bh_enable();
353 wait_for_completion(&info.completion);
354 mutex_unlock(&flow_flush_sem);
355 put_online_cpus();
358 static void __init flow_cache_cpu_prepare(struct flow_cache *fc,
359 struct flow_cache_percpu *fcp)
361 fcp->hash_table = (struct hlist_head *)
362 __get_free_pages(GFP_KERNEL|__GFP_ZERO, fc->order);
363 if (!fcp->hash_table)
364 panic("NET: failed to allocate flow cache order %lu\n", fc->order);
366 fcp->hash_rnd_recalc = 1;
367 fcp->hash_count = 0;
368 tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0);
371 static int flow_cache_cpu(struct notifier_block *nfb,
372 unsigned long action,
373 void *hcpu)
375 struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier);
376 int cpu = (unsigned long) hcpu;
377 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
379 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
380 __flow_cache_shrink(fc, fcp, 0);
381 return NOTIFY_OK;
384 static int flow_cache_init(struct flow_cache *fc)
386 unsigned long order;
387 int i;
389 fc->hash_shift = 10;
390 fc->low_watermark = 2 * flow_cache_hash_size(fc);
391 fc->high_watermark = 4 * flow_cache_hash_size(fc);
393 for (order = 0;
394 (PAGE_SIZE << order) <
395 (sizeof(struct hlist_head)*flow_cache_hash_size(fc));
396 order++)
397 /* NOTHING */;
398 fc->order = order;
399 fc->percpu = alloc_percpu(struct flow_cache_percpu);
401 setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
402 (unsigned long) fc);
403 fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
404 add_timer(&fc->rnd_timer);
406 for_each_possible_cpu(i)
407 flow_cache_cpu_prepare(fc, per_cpu_ptr(fc->percpu, i));
409 fc->hotcpu_notifier = (struct notifier_block){
410 .notifier_call = flow_cache_cpu,
412 register_hotcpu_notifier(&fc->hotcpu_notifier);
414 return 0;
417 static int __init flow_cache_init_global(void)
419 flow_cachep = kmem_cache_create("flow_cache",
420 sizeof(struct flow_cache_entry),
421 0, SLAB_PANIC, NULL);
423 return flow_cache_init(&flow_cache_global);
426 module_init(flow_cache_init_global);
428 EXPORT_SYMBOL(flow_cache_genid);
429 EXPORT_SYMBOL(flow_cache_lookup);