ipv4: register igmp_notifier even when !CONFIG_PROC_FS
[linux-2.6/btrfs-unstable.git] / net / ipv4 / inet_fragment.c
blobbb075fc9a14f25169c175c7fcdcb86d56c709627
1 /*
2 * inet fragments management
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Pavel Emelyanov <xemul@openvz.org>
10 * Started as consolidation of ipv4/ip_fragment.c,
11 * ipv6/reassembly. and ipv6 nf conntrack reassembly
14 #include <linux/list.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <linux/timer.h>
18 #include <linux/mm.h>
19 #include <linux/random.h>
20 #include <linux/skbuff.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/slab.h>
24 #include <net/sock.h>
25 #include <net/inet_frag.h>
26 #include <net/inet_ecn.h>
28 /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
29 * Value : 0xff if frame should be dropped.
30 * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
32 const u8 ip_frag_ecn_table[16] = {
33 /* at least one fragment had CE, and others ECT_0 or ECT_1 */
34 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = INET_ECN_CE,
35 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
36 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
38 /* invalid combinations : drop frame */
39 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
40 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
41 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
42 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
43 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
44 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
45 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
47 EXPORT_SYMBOL(ip_frag_ecn_table);
49 static void inet_frag_secret_rebuild(unsigned long dummy)
51 struct inet_frags *f = (struct inet_frags *)dummy;
52 unsigned long now = jiffies;
53 int i;
55 /* Per bucket lock NOT needed here, due to write lock protection */
56 write_lock(&f->lock);
58 get_random_bytes(&f->rnd, sizeof(u32));
59 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
60 struct inet_frag_bucket *hb;
61 struct inet_frag_queue *q;
62 struct hlist_node *n;
64 hb = &f->hash[i];
65 hlist_for_each_entry_safe(q, n, &hb->chain, list) {
66 unsigned int hval = f->hashfn(q);
68 if (hval != i) {
69 struct inet_frag_bucket *hb_dest;
71 hlist_del(&q->list);
73 /* Relink to new hash chain. */
74 hb_dest = &f->hash[hval];
75 hlist_add_head(&q->list, &hb_dest->chain);
79 write_unlock(&f->lock);
81 mod_timer(&f->secret_timer, now + f->secret_interval);
84 void inet_frags_init(struct inet_frags *f)
86 int i;
88 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
89 struct inet_frag_bucket *hb = &f->hash[i];
91 spin_lock_init(&hb->chain_lock);
92 INIT_HLIST_HEAD(&hb->chain);
94 rwlock_init(&f->lock);
96 setup_timer(&f->secret_timer, inet_frag_secret_rebuild,
97 (unsigned long)f);
98 f->secret_timer.expires = jiffies + f->secret_interval;
99 add_timer(&f->secret_timer);
101 EXPORT_SYMBOL(inet_frags_init);
103 void inet_frags_init_net(struct netns_frags *nf)
105 nf->nqueues = 0;
106 init_frag_mem_limit(nf);
107 INIT_LIST_HEAD(&nf->lru_list);
108 spin_lock_init(&nf->lru_lock);
110 EXPORT_SYMBOL(inet_frags_init_net);
112 void inet_frags_fini(struct inet_frags *f)
114 del_timer(&f->secret_timer);
116 EXPORT_SYMBOL(inet_frags_fini);
118 void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
120 nf->low_thresh = 0;
122 local_bh_disable();
123 inet_frag_evictor(nf, f, true);
124 local_bh_enable();
126 percpu_counter_destroy(&nf->mem);
128 EXPORT_SYMBOL(inet_frags_exit_net);
130 static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
132 struct inet_frag_bucket *hb;
133 unsigned int hash;
135 read_lock(&f->lock);
136 hash = f->hashfn(fq);
137 hb = &f->hash[hash];
139 spin_lock(&hb->chain_lock);
140 hlist_del(&fq->list);
141 spin_unlock(&hb->chain_lock);
143 read_unlock(&f->lock);
144 inet_frag_lru_del(fq);
147 void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
149 if (del_timer(&fq->timer))
150 atomic_dec(&fq->refcnt);
152 if (!(fq->last_in & INET_FRAG_COMPLETE)) {
153 fq_unlink(fq, f);
154 atomic_dec(&fq->refcnt);
155 fq->last_in |= INET_FRAG_COMPLETE;
158 EXPORT_SYMBOL(inet_frag_kill);
160 static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f,
161 struct sk_buff *skb)
163 if (f->skb_free)
164 f->skb_free(skb);
165 kfree_skb(skb);
168 void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f,
169 int *work)
171 struct sk_buff *fp;
172 struct netns_frags *nf;
173 unsigned int sum, sum_truesize = 0;
175 WARN_ON(!(q->last_in & INET_FRAG_COMPLETE));
176 WARN_ON(del_timer(&q->timer) != 0);
178 /* Release all fragment data. */
179 fp = q->fragments;
180 nf = q->net;
181 while (fp) {
182 struct sk_buff *xp = fp->next;
184 sum_truesize += fp->truesize;
185 frag_kfree_skb(nf, f, fp);
186 fp = xp;
188 sum = sum_truesize + f->qsize;
189 if (work)
190 *work -= sum;
191 sub_frag_mem_limit(q, sum);
193 if (f->destructor)
194 f->destructor(q);
195 kfree(q);
198 EXPORT_SYMBOL(inet_frag_destroy);
200 int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force)
202 struct inet_frag_queue *q;
203 int work, evicted = 0;
205 if (!force) {
206 if (frag_mem_limit(nf) <= nf->high_thresh)
207 return 0;
210 work = frag_mem_limit(nf) - nf->low_thresh;
211 while (work > 0) {
212 spin_lock(&nf->lru_lock);
214 if (list_empty(&nf->lru_list)) {
215 spin_unlock(&nf->lru_lock);
216 break;
219 q = list_first_entry(&nf->lru_list,
220 struct inet_frag_queue, lru_list);
221 atomic_inc(&q->refcnt);
222 /* Remove q from list to avoid several CPUs grabbing it */
223 list_del_init(&q->lru_list);
225 spin_unlock(&nf->lru_lock);
227 spin_lock(&q->lock);
228 if (!(q->last_in & INET_FRAG_COMPLETE))
229 inet_frag_kill(q, f);
230 spin_unlock(&q->lock);
232 if (atomic_dec_and_test(&q->refcnt))
233 inet_frag_destroy(q, f, &work);
234 evicted++;
237 return evicted;
239 EXPORT_SYMBOL(inet_frag_evictor);
241 static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
242 struct inet_frag_queue *qp_in, struct inet_frags *f,
243 void *arg)
245 struct inet_frag_bucket *hb;
246 struct inet_frag_queue *qp;
247 unsigned int hash;
249 read_lock(&f->lock); /* Protects against hash rebuild */
251 * While we stayed w/o the lock other CPU could update
252 * the rnd seed, so we need to re-calculate the hash
253 * chain. Fortunatelly the qp_in can be used to get one.
255 hash = f->hashfn(qp_in);
256 hb = &f->hash[hash];
257 spin_lock(&hb->chain_lock);
259 #ifdef CONFIG_SMP
260 /* With SMP race we have to recheck hash table, because
261 * such entry could be created on other cpu, while we
262 * released the hash bucket lock.
264 hlist_for_each_entry(qp, &hb->chain, list) {
265 if (qp->net == nf && f->match(qp, arg)) {
266 atomic_inc(&qp->refcnt);
267 spin_unlock(&hb->chain_lock);
268 read_unlock(&f->lock);
269 qp_in->last_in |= INET_FRAG_COMPLETE;
270 inet_frag_put(qp_in, f);
271 return qp;
274 #endif
275 qp = qp_in;
276 if (!mod_timer(&qp->timer, jiffies + nf->timeout))
277 atomic_inc(&qp->refcnt);
279 atomic_inc(&qp->refcnt);
280 hlist_add_head(&qp->list, &hb->chain);
281 spin_unlock(&hb->chain_lock);
282 read_unlock(&f->lock);
283 inet_frag_lru_add(nf, qp);
284 return qp;
287 static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
288 struct inet_frags *f, void *arg)
290 struct inet_frag_queue *q;
292 q = kzalloc(f->qsize, GFP_ATOMIC);
293 if (q == NULL)
294 return NULL;
296 q->net = nf;
297 f->constructor(q, arg);
298 add_frag_mem_limit(q, f->qsize);
300 setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
301 spin_lock_init(&q->lock);
302 atomic_set(&q->refcnt, 1);
303 INIT_LIST_HEAD(&q->lru_list);
305 return q;
308 static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
309 struct inet_frags *f, void *arg)
311 struct inet_frag_queue *q;
313 q = inet_frag_alloc(nf, f, arg);
314 if (q == NULL)
315 return NULL;
317 return inet_frag_intern(nf, q, f, arg);
320 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
321 struct inet_frags *f, void *key, unsigned int hash)
322 __releases(&f->lock)
324 struct inet_frag_bucket *hb;
325 struct inet_frag_queue *q;
326 int depth = 0;
328 hb = &f->hash[hash];
330 spin_lock(&hb->chain_lock);
331 hlist_for_each_entry(q, &hb->chain, list) {
332 if (q->net == nf && f->match(q, key)) {
333 atomic_inc(&q->refcnt);
334 spin_unlock(&hb->chain_lock);
335 read_unlock(&f->lock);
336 return q;
338 depth++;
340 spin_unlock(&hb->chain_lock);
341 read_unlock(&f->lock);
343 if (depth <= INETFRAGS_MAXDEPTH)
344 return inet_frag_create(nf, f, key);
345 else
346 return ERR_PTR(-ENOBUFS);
348 EXPORT_SYMBOL(inet_frag_find);
350 void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
351 const char *prefix)
353 static const char msg[] = "inet_frag_find: Fragment hash bucket"
354 " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
355 ". Dropping fragment.\n";
357 if (PTR_ERR(q) == -ENOBUFS)
358 LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg);
360 EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);