1 /* flow.c: Generic flow cache.
3 * Copyright (C) 2003 Alexey N. Kuznetsov (kuznet@ms2.inr.ac.ru)
4 * Copyright (C) 2003 David S. Miller (davem@redhat.com)
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/list.h>
10 #include <linux/jhash.h>
11 #include <linux/interrupt.h>
13 #include <linux/random.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/smp.h>
17 #include <linux/completion.h>
18 #include <linux/percpu.h>
19 #include <linux/bitops.h>
20 #include <linux/notifier.h>
21 #include <linux/cpu.h>
22 #include <linux/cpumask.h>
23 #include <linux/mutex.h>
25 #include <asm/atomic.h>
26 #include <linux/security.h>
28 struct flow_cache_entry
{
30 struct hlist_node hlist
;
31 struct list_head gc_list
;
37 struct flow_cache_object
*object
;
40 struct flow_cache_percpu
{
41 struct hlist_head
*hash_table
;
45 struct tasklet_struct flush_tasklet
;
48 struct flow_flush_info
{
49 struct flow_cache
*cache
;
51 struct completion completion
;
56 struct flow_cache_percpu __percpu
*percpu
;
57 struct notifier_block hotcpu_notifier
;
60 struct timer_list rnd_timer
;
63 atomic_t flow_cache_genid
= ATOMIC_INIT(0);
64 EXPORT_SYMBOL(flow_cache_genid
);
65 static struct flow_cache flow_cache_global
;
66 static struct kmem_cache
*flow_cachep __read_mostly
;
68 static DEFINE_SPINLOCK(flow_cache_gc_lock
);
69 static LIST_HEAD(flow_cache_gc_list
);
71 #define flow_cache_hash_size(cache) (1 << (cache)->hash_shift)
72 #define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
74 static void flow_cache_new_hashrnd(unsigned long arg
)
76 struct flow_cache
*fc
= (void *) arg
;
79 for_each_possible_cpu(i
)
80 per_cpu_ptr(fc
->percpu
, i
)->hash_rnd_recalc
= 1;
82 fc
->rnd_timer
.expires
= jiffies
+ FLOW_HASH_RND_PERIOD
;
83 add_timer(&fc
->rnd_timer
);
86 static int flow_entry_valid(struct flow_cache_entry
*fle
)
88 if (atomic_read(&flow_cache_genid
) != fle
->genid
)
90 if (fle
->object
&& !fle
->object
->ops
->check(fle
->object
))
95 static void flow_entry_kill(struct flow_cache_entry
*fle
)
98 fle
->object
->ops
->delete(fle
->object
);
99 kmem_cache_free(flow_cachep
, fle
);
102 static void flow_cache_gc_task(struct work_struct
*work
)
104 struct list_head gc_list
;
105 struct flow_cache_entry
*fce
, *n
;
107 INIT_LIST_HEAD(&gc_list
);
108 spin_lock_bh(&flow_cache_gc_lock
);
109 list_splice_tail_init(&flow_cache_gc_list
, &gc_list
);
110 spin_unlock_bh(&flow_cache_gc_lock
);
112 list_for_each_entry_safe(fce
, n
, &gc_list
, u
.gc_list
)
113 flow_entry_kill(fce
);
115 static DECLARE_WORK(flow_cache_gc_work
, flow_cache_gc_task
);
117 static void flow_cache_queue_garbage(struct flow_cache_percpu
*fcp
,
118 int deleted
, struct list_head
*gc_list
)
121 fcp
->hash_count
-= deleted
;
122 spin_lock_bh(&flow_cache_gc_lock
);
123 list_splice_tail(gc_list
, &flow_cache_gc_list
);
124 spin_unlock_bh(&flow_cache_gc_lock
);
125 schedule_work(&flow_cache_gc_work
);
129 static void __flow_cache_shrink(struct flow_cache
*fc
,
130 struct flow_cache_percpu
*fcp
,
133 struct flow_cache_entry
*fle
;
134 struct hlist_node
*entry
, *tmp
;
138 for (i
= 0; i
< flow_cache_hash_size(fc
); i
++) {
141 hlist_for_each_entry_safe(fle
, entry
, tmp
,
142 &fcp
->hash_table
[i
], u
.hlist
) {
143 if (saved
< shrink_to
&&
144 flow_entry_valid(fle
)) {
148 hlist_del(&fle
->u
.hlist
);
149 list_add_tail(&fle
->u
.gc_list
, &gc_list
);
154 flow_cache_queue_garbage(fcp
, deleted
, &gc_list
);
157 static void flow_cache_shrink(struct flow_cache
*fc
,
158 struct flow_cache_percpu
*fcp
)
160 int shrink_to
= fc
->low_watermark
/ flow_cache_hash_size(fc
);
162 __flow_cache_shrink(fc
, fcp
, shrink_to
);
165 static void flow_new_hash_rnd(struct flow_cache
*fc
,
166 struct flow_cache_percpu
*fcp
)
168 get_random_bytes(&fcp
->hash_rnd
, sizeof(u32
));
169 fcp
->hash_rnd_recalc
= 0;
170 __flow_cache_shrink(fc
, fcp
, 0);
173 static u32
flow_hash_code(struct flow_cache
*fc
,
174 struct flow_cache_percpu
*fcp
,
175 const struct flowi
*key
,
178 const u32
*k
= (const u32
*) key
;
179 const u32 length
= keysize
* sizeof(flow_compare_t
) / sizeof(u32
);
181 return jhash2(k
, length
, fcp
->hash_rnd
)
182 & (flow_cache_hash_size(fc
) - 1);
185 /* I hear what you're saying, use memcmp. But memcmp cannot make
186 * important assumptions that we can here, such as alignment.
188 static int flow_key_compare(const struct flowi
*key1
, const struct flowi
*key2
,
191 const flow_compare_t
*k1
, *k1_lim
, *k2
;
193 k1
= (const flow_compare_t
*) key1
;
194 k1_lim
= k1
+ keysize
;
196 k2
= (const flow_compare_t
*) key2
;
201 } while (k1
< k1_lim
);
206 struct flow_cache_object
*
207 flow_cache_lookup(struct net
*net
, const struct flowi
*key
, u16 family
, u8 dir
,
208 flow_resolve_t resolver
, void *ctx
)
210 struct flow_cache
*fc
= &flow_cache_global
;
211 struct flow_cache_percpu
*fcp
;
212 struct flow_cache_entry
*fle
, *tfle
;
213 struct hlist_node
*entry
;
214 struct flow_cache_object
*flo
;
219 fcp
= this_cpu_ptr(fc
->percpu
);
224 keysize
= flow_key_size(family
);
228 /* Packet really early in init? Making flow_cache_init a
229 * pre-smp initcall would solve this. --RR */
230 if (!fcp
->hash_table
)
233 if (fcp
->hash_rnd_recalc
)
234 flow_new_hash_rnd(fc
, fcp
);
236 hash
= flow_hash_code(fc
, fcp
, key
, keysize
);
237 hlist_for_each_entry(tfle
, entry
, &fcp
->hash_table
[hash
], u
.hlist
) {
238 if (tfle
->family
== family
&&
240 flow_key_compare(key
, &tfle
->key
, keysize
) == 0) {
246 if (unlikely(!fle
)) {
247 if (fcp
->hash_count
> fc
->high_watermark
)
248 flow_cache_shrink(fc
, fcp
);
250 fle
= kmem_cache_alloc(flow_cachep
, GFP_ATOMIC
);
252 fle
->family
= family
;
254 memcpy(&fle
->key
, key
, keysize
* sizeof(flow_compare_t
));
256 hlist_add_head(&fle
->u
.hlist
, &fcp
->hash_table
[hash
]);
259 } else if (likely(fle
->genid
== atomic_read(&flow_cache_genid
))) {
263 flo
= flo
->ops
->get(flo
);
266 } else if (fle
->object
) {
268 flo
->ops
->delete(flo
);
278 flo
= resolver(net
, key
, family
, dir
, flo
, ctx
);
280 fle
->genid
= atomic_read(&flow_cache_genid
);
286 if (flo
&& !IS_ERR(flo
))
287 flo
->ops
->delete(flo
);
293 EXPORT_SYMBOL(flow_cache_lookup
);
295 static void flow_cache_flush_tasklet(unsigned long data
)
297 struct flow_flush_info
*info
= (void *)data
;
298 struct flow_cache
*fc
= info
->cache
;
299 struct flow_cache_percpu
*fcp
;
300 struct flow_cache_entry
*fle
;
301 struct hlist_node
*entry
, *tmp
;
305 fcp
= this_cpu_ptr(fc
->percpu
);
306 for (i
= 0; i
< flow_cache_hash_size(fc
); i
++) {
307 hlist_for_each_entry_safe(fle
, entry
, tmp
,
308 &fcp
->hash_table
[i
], u
.hlist
) {
309 if (flow_entry_valid(fle
))
313 hlist_del(&fle
->u
.hlist
);
314 list_add_tail(&fle
->u
.gc_list
, &gc_list
);
318 flow_cache_queue_garbage(fcp
, deleted
, &gc_list
);
320 if (atomic_dec_and_test(&info
->cpuleft
))
321 complete(&info
->completion
);
324 static void flow_cache_flush_per_cpu(void *data
)
326 struct flow_flush_info
*info
= data
;
328 struct tasklet_struct
*tasklet
;
330 cpu
= smp_processor_id();
331 tasklet
= &per_cpu_ptr(info
->cache
->percpu
, cpu
)->flush_tasklet
;
332 tasklet
->data
= (unsigned long)info
;
333 tasklet_schedule(tasklet
);
336 void flow_cache_flush(void)
338 struct flow_flush_info info
;
339 static DEFINE_MUTEX(flow_flush_sem
);
341 /* Don't want cpus going down or up during this. */
343 mutex_lock(&flow_flush_sem
);
344 info
.cache
= &flow_cache_global
;
345 atomic_set(&info
.cpuleft
, num_online_cpus());
346 init_completion(&info
.completion
);
349 smp_call_function(flow_cache_flush_per_cpu
, &info
, 0);
350 flow_cache_flush_tasklet((unsigned long)&info
);
353 wait_for_completion(&info
.completion
);
354 mutex_unlock(&flow_flush_sem
);
358 static int __cpuinit
flow_cache_cpu_prepare(struct flow_cache
*fc
, int cpu
)
360 struct flow_cache_percpu
*fcp
= per_cpu_ptr(fc
->percpu
, cpu
);
361 size_t sz
= sizeof(struct hlist_head
) * flow_cache_hash_size(fc
);
363 if (!fcp
->hash_table
) {
364 fcp
->hash_table
= kzalloc_node(sz
, GFP_KERNEL
, cpu_to_node(cpu
));
365 if (!fcp
->hash_table
) {
366 pr_err("NET: failed to allocate flow cache sz %zu\n", sz
);
369 fcp
->hash_rnd_recalc
= 1;
371 tasklet_init(&fcp
->flush_tasklet
, flow_cache_flush_tasklet
, 0);
376 static int __cpuinit
flow_cache_cpu(struct notifier_block
*nfb
,
377 unsigned long action
,
380 struct flow_cache
*fc
= container_of(nfb
, struct flow_cache
, hotcpu_notifier
);
381 int res
, cpu
= (unsigned long) hcpu
;
382 struct flow_cache_percpu
*fcp
= per_cpu_ptr(fc
->percpu
, cpu
);
386 case CPU_UP_PREPARE_FROZEN
:
387 res
= flow_cache_cpu_prepare(fc
, cpu
);
389 return notifier_from_errno(res
);
392 case CPU_DEAD_FROZEN
:
393 __flow_cache_shrink(fc
, fcp
, 0);
399 static int __init
flow_cache_init(struct flow_cache
*fc
)
404 fc
->low_watermark
= 2 * flow_cache_hash_size(fc
);
405 fc
->high_watermark
= 4 * flow_cache_hash_size(fc
);
407 fc
->percpu
= alloc_percpu(struct flow_cache_percpu
);
411 for_each_online_cpu(i
) {
412 if (flow_cache_cpu_prepare(fc
, i
))
415 fc
->hotcpu_notifier
= (struct notifier_block
){
416 .notifier_call
= flow_cache_cpu
,
418 register_hotcpu_notifier(&fc
->hotcpu_notifier
);
420 setup_timer(&fc
->rnd_timer
, flow_cache_new_hashrnd
,
422 fc
->rnd_timer
.expires
= jiffies
+ FLOW_HASH_RND_PERIOD
;
423 add_timer(&fc
->rnd_timer
);
428 static int __init
flow_cache_init_global(void)
430 flow_cachep
= kmem_cache_create("flow_cache",
431 sizeof(struct flow_cache_entry
),
432 0, SLAB_PANIC
, NULL
);
434 return flow_cache_init(&flow_cache_global
);
437 module_init(flow_cache_init_global
);