2 * inet fragments management
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Pavel Emelyanov <xemul@openvz.org>
10 * Started as consolidation of ipv4/ip_fragment.c,
11 * ipv6/reassembly. and ipv6 nf conntrack reassembly
14 #include <linux/list.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <linux/timer.h>
19 #include <linux/random.h>
20 #include <linux/skbuff.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/slab.h>
25 #include <net/inet_frag.h>
26 #include <net/inet_ecn.h>
28 /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
29 * Value : 0xff if frame should be dropped.
30 * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
32 const u8 ip_frag_ecn_table
[16] = {
33 /* at least one fragment had CE, and others ECT_0 or ECT_1 */
34 [IPFRAG_ECN_CE
| IPFRAG_ECN_ECT_0
] = INET_ECN_CE
,
35 [IPFRAG_ECN_CE
| IPFRAG_ECN_ECT_1
] = INET_ECN_CE
,
36 [IPFRAG_ECN_CE
| IPFRAG_ECN_ECT_0
| IPFRAG_ECN_ECT_1
] = INET_ECN_CE
,
38 /* invalid combinations : drop frame */
39 [IPFRAG_ECN_NOT_ECT
| IPFRAG_ECN_CE
] = 0xff,
40 [IPFRAG_ECN_NOT_ECT
| IPFRAG_ECN_ECT_0
] = 0xff,
41 [IPFRAG_ECN_NOT_ECT
| IPFRAG_ECN_ECT_1
] = 0xff,
42 [IPFRAG_ECN_NOT_ECT
| IPFRAG_ECN_ECT_0
| IPFRAG_ECN_ECT_1
] = 0xff,
43 [IPFRAG_ECN_NOT_ECT
| IPFRAG_ECN_CE
| IPFRAG_ECN_ECT_0
] = 0xff,
44 [IPFRAG_ECN_NOT_ECT
| IPFRAG_ECN_CE
| IPFRAG_ECN_ECT_1
] = 0xff,
45 [IPFRAG_ECN_NOT_ECT
| IPFRAG_ECN_CE
| IPFRAG_ECN_ECT_0
| IPFRAG_ECN_ECT_1
] = 0xff,
47 EXPORT_SYMBOL(ip_frag_ecn_table
);
49 static void inet_frag_secret_rebuild(unsigned long dummy
)
51 struct inet_frags
*f
= (struct inet_frags
*)dummy
;
52 unsigned long now
= jiffies
;
55 /* Per bucket lock NOT needed here, due to write lock protection */
58 get_random_bytes(&f
->rnd
, sizeof(u32
));
59 for (i
= 0; i
< INETFRAGS_HASHSZ
; i
++) {
60 struct inet_frag_bucket
*hb
;
61 struct inet_frag_queue
*q
;
65 hlist_for_each_entry_safe(q
, n
, &hb
->chain
, list
) {
66 unsigned int hval
= f
->hashfn(q
);
69 struct inet_frag_bucket
*hb_dest
;
73 /* Relink to new hash chain. */
74 hb_dest
= &f
->hash
[hval
];
75 hlist_add_head(&q
->list
, &hb_dest
->chain
);
79 write_unlock(&f
->lock
);
81 mod_timer(&f
->secret_timer
, now
+ f
->secret_interval
);
84 void inet_frags_init(struct inet_frags
*f
)
88 for (i
= 0; i
< INETFRAGS_HASHSZ
; i
++) {
89 struct inet_frag_bucket
*hb
= &f
->hash
[i
];
91 spin_lock_init(&hb
->chain_lock
);
92 INIT_HLIST_HEAD(&hb
->chain
);
94 rwlock_init(&f
->lock
);
96 f
->rnd
= (u32
) ((totalram_pages
^ (totalram_pages
>> 7)) ^
97 (jiffies
^ (jiffies
>> 6)));
99 setup_timer(&f
->secret_timer
, inet_frag_secret_rebuild
,
101 f
->secret_timer
.expires
= jiffies
+ f
->secret_interval
;
102 add_timer(&f
->secret_timer
);
104 EXPORT_SYMBOL(inet_frags_init
);
106 void inet_frags_init_net(struct netns_frags
*nf
)
109 init_frag_mem_limit(nf
);
110 INIT_LIST_HEAD(&nf
->lru_list
);
111 spin_lock_init(&nf
->lru_lock
);
113 EXPORT_SYMBOL(inet_frags_init_net
);
115 void inet_frags_fini(struct inet_frags
*f
)
117 del_timer(&f
->secret_timer
);
119 EXPORT_SYMBOL(inet_frags_fini
);
121 void inet_frags_exit_net(struct netns_frags
*nf
, struct inet_frags
*f
)
126 inet_frag_evictor(nf
, f
, true);
129 percpu_counter_destroy(&nf
->mem
);
131 EXPORT_SYMBOL(inet_frags_exit_net
);
133 static inline void fq_unlink(struct inet_frag_queue
*fq
, struct inet_frags
*f
)
135 struct inet_frag_bucket
*hb
;
139 hash
= f
->hashfn(fq
);
142 spin_lock(&hb
->chain_lock
);
143 hlist_del(&fq
->list
);
144 spin_unlock(&hb
->chain_lock
);
146 read_unlock(&f
->lock
);
147 inet_frag_lru_del(fq
);
150 void inet_frag_kill(struct inet_frag_queue
*fq
, struct inet_frags
*f
)
152 if (del_timer(&fq
->timer
))
153 atomic_dec(&fq
->refcnt
);
155 if (!(fq
->last_in
& INET_FRAG_COMPLETE
)) {
157 atomic_dec(&fq
->refcnt
);
158 fq
->last_in
|= INET_FRAG_COMPLETE
;
161 EXPORT_SYMBOL(inet_frag_kill
);
163 static inline void frag_kfree_skb(struct netns_frags
*nf
, struct inet_frags
*f
,
171 void inet_frag_destroy(struct inet_frag_queue
*q
, struct inet_frags
*f
,
175 struct netns_frags
*nf
;
176 unsigned int sum
, sum_truesize
= 0;
178 WARN_ON(!(q
->last_in
& INET_FRAG_COMPLETE
));
179 WARN_ON(del_timer(&q
->timer
) != 0);
181 /* Release all fragment data. */
185 struct sk_buff
*xp
= fp
->next
;
187 sum_truesize
+= fp
->truesize
;
188 frag_kfree_skb(nf
, f
, fp
);
191 sum
= sum_truesize
+ f
->qsize
;
194 sub_frag_mem_limit(q
, sum
);
201 EXPORT_SYMBOL(inet_frag_destroy
);
203 int inet_frag_evictor(struct netns_frags
*nf
, struct inet_frags
*f
, bool force
)
205 struct inet_frag_queue
*q
;
206 int work
, evicted
= 0;
209 if (frag_mem_limit(nf
) <= nf
->high_thresh
)
213 work
= frag_mem_limit(nf
) - nf
->low_thresh
;
215 spin_lock(&nf
->lru_lock
);
217 if (list_empty(&nf
->lru_list
)) {
218 spin_unlock(&nf
->lru_lock
);
222 q
= list_first_entry(&nf
->lru_list
,
223 struct inet_frag_queue
, lru_list
);
224 atomic_inc(&q
->refcnt
);
225 /* Remove q from list to avoid several CPUs grabbing it */
226 list_del_init(&q
->lru_list
);
228 spin_unlock(&nf
->lru_lock
);
231 if (!(q
->last_in
& INET_FRAG_COMPLETE
))
232 inet_frag_kill(q
, f
);
233 spin_unlock(&q
->lock
);
235 if (atomic_dec_and_test(&q
->refcnt
))
236 inet_frag_destroy(q
, f
, &work
);
242 EXPORT_SYMBOL(inet_frag_evictor
);
244 static struct inet_frag_queue
*inet_frag_intern(struct netns_frags
*nf
,
245 struct inet_frag_queue
*qp_in
, struct inet_frags
*f
,
248 struct inet_frag_bucket
*hb
;
249 struct inet_frag_queue
*qp
;
252 read_lock(&f
->lock
); /* Protects against hash rebuild */
254 * While we stayed w/o the lock other CPU could update
255 * the rnd seed, so we need to re-calculate the hash
256 * chain. Fortunatelly the qp_in can be used to get one.
258 hash
= f
->hashfn(qp_in
);
260 spin_lock(&hb
->chain_lock
);
263 /* With SMP race we have to recheck hash table, because
264 * such entry could be created on other cpu, while we
265 * released the hash bucket lock.
267 hlist_for_each_entry(qp
, &hb
->chain
, list
) {
268 if (qp
->net
== nf
&& f
->match(qp
, arg
)) {
269 atomic_inc(&qp
->refcnt
);
270 spin_unlock(&hb
->chain_lock
);
271 read_unlock(&f
->lock
);
272 qp_in
->last_in
|= INET_FRAG_COMPLETE
;
273 inet_frag_put(qp_in
, f
);
279 if (!mod_timer(&qp
->timer
, jiffies
+ nf
->timeout
))
280 atomic_inc(&qp
->refcnt
);
282 atomic_inc(&qp
->refcnt
);
283 hlist_add_head(&qp
->list
, &hb
->chain
);
284 spin_unlock(&hb
->chain_lock
);
285 read_unlock(&f
->lock
);
286 inet_frag_lru_add(nf
, qp
);
290 static struct inet_frag_queue
*inet_frag_alloc(struct netns_frags
*nf
,
291 struct inet_frags
*f
, void *arg
)
293 struct inet_frag_queue
*q
;
295 q
= kzalloc(f
->qsize
, GFP_ATOMIC
);
300 f
->constructor(q
, arg
);
301 add_frag_mem_limit(q
, f
->qsize
);
303 setup_timer(&q
->timer
, f
->frag_expire
, (unsigned long)q
);
304 spin_lock_init(&q
->lock
);
305 atomic_set(&q
->refcnt
, 1);
306 INIT_LIST_HEAD(&q
->lru_list
);
311 static struct inet_frag_queue
*inet_frag_create(struct netns_frags
*nf
,
312 struct inet_frags
*f
, void *arg
)
314 struct inet_frag_queue
*q
;
316 q
= inet_frag_alloc(nf
, f
, arg
);
320 return inet_frag_intern(nf
, q
, f
, arg
);
323 struct inet_frag_queue
*inet_frag_find(struct netns_frags
*nf
,
324 struct inet_frags
*f
, void *key
, unsigned int hash
)
327 struct inet_frag_bucket
*hb
;
328 struct inet_frag_queue
*q
;
333 spin_lock(&hb
->chain_lock
);
334 hlist_for_each_entry(q
, &hb
->chain
, list
) {
335 if (q
->net
== nf
&& f
->match(q
, key
)) {
336 atomic_inc(&q
->refcnt
);
337 spin_unlock(&hb
->chain_lock
);
338 read_unlock(&f
->lock
);
343 spin_unlock(&hb
->chain_lock
);
344 read_unlock(&f
->lock
);
346 if (depth
<= INETFRAGS_MAXDEPTH
)
347 return inet_frag_create(nf
, f
, key
);
349 return ERR_PTR(-ENOBUFS
);
351 EXPORT_SYMBOL(inet_frag_find
);
353 void inet_frag_maybe_warn_overflow(struct inet_frag_queue
*q
,
356 static const char msg
[] = "inet_frag_find: Fragment hash bucket"
357 " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH
)
358 ". Dropping fragment.\n";
360 if (PTR_ERR(q
) == -ENOBUFS
)
361 LIMIT_NETDEBUG(KERN_WARNING
"%s%s", prefix
, msg
);
363 EXPORT_SYMBOL(inet_frag_maybe_warn_overflow
);